Skip to content

Commit 78a9de9

Browse files
authored
New API and new operator overloading issue (#81)
* Add `tf.nn.sparse_softmax_cross_entropy_with_logits()`. * Mark test that tests wala#136. * Update tests to reflect new API. * Update test in light of wala#136.
1 parent e1ea532 commit 78a9de9

File tree

4 files changed

+37
-3
lines changed

4 files changed

+37
-3
lines changed

com.ibm.wala.cast.python.ml.test/source/com/ibm/wala/cast/python/ml/test/TestTensorflowModel.java

+13-3
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,11 @@ public void testTf2()
217217
testTf2("tf2_test_dataset10.py", "add", 2, 2, 2, 3);
218218
testTf2("tf2_test_tensor_list.py", "add", 2, 2, 2, 3);
219219
testTf2("tf2_test_tensor_list2.py", "add", 0, 0);
220-
testTf2("tf2_test_tensor_list3.py", "add", 0, 0);
220+
testTf2(
221+
"tf2_test_tensor_list3.py",
222+
"add",
223+
0,
224+
0); // NOTE: Change to 2, 2, 2, 3 once https://github.com/wala/ML/issues/136 is fixed.
221225
testTf2("tf2_test_tensor_list4.py", "add", 0, 0);
222226
testTf2("tf2_test_tensor_list5.py", "add", 0, 0);
223227
testTf2("tf2_test_model_call.py", "SequentialModel.__call__", 1, 1, 3);
@@ -234,10 +238,10 @@ public void testTf2()
234238
"neural_network.py",
235239
"cross_entropy_loss",
236240
1,
237-
4,
241+
8,
238242
3); // NOTE: Change to 2 tensor parameters once https://github.com/wala/ML/issues/127 is
239243
// fixed. Values 2 and 3 will correspond to the tensor parameters.
240-
testTf2("neural_network.py", "run_optimization", 2, 2, 2, 3);
244+
testTf2("neural_network.py", "run_optimization", 2, 3, 2, 3);
241245
testTf2(
242246
"neural_network.py",
243247
"accuracy",
@@ -259,13 +263,19 @@ public void testTf2()
259263
testTf2("tf2_test_add5.py", "f", 1, 1, 2);
260264
testTf2("tf2_test_add6.py", "f", 1, 1, 2);
261265
testTf2("multigpu_training.py", "run_optimization", 2, 4, 2, 3);
266+
testTf2(
267+
"multigpu_training.py",
268+
"average_gradients",
269+
0,
270+
0); // NOTE: Change to 1, 1, 2 once https://github.com/wala/ML/issues/136 is fixed.
262271
testTf2("tf2_test_reduce_mean.py", "f", 1, 1, 2);
263272
testTf2("tf2_test_reduce_mean.py", "g", 1, 1, 2);
264273
testTf2("tf2_test_reduce_mean.py", "h", 1, 1, 2);
265274
testTf2("tf2_test_gradient.py", "f", 1, 1, 2);
266275
testTf2("tf2_test_gradient2.py", "f", 1, 1, 2);
267276
testTf2("tf2_test_multiply.py", "f", 1, 1, 2);
268277
testTf2("tf2_test_multiply2.py", "f", 1, 1, 2);
278+
testTf2("tf2_test_sparse_softmax_cross_entropy_with_logits.py", "f", 1, 1, 2);
269279
}
270280

271281
private void testTf2(

com.ibm.wala.cast.python.ml/data/tensorflow.xml

+8
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,8 @@
7575
<putfield class="LRoot" field="conv3d" fieldType="LRoot" ref="nn" value="conv3d" />
7676
<new def="softmax" class="Ltensorflow/functions/softmax" />
7777
<putfield class="LRoot" field="softmax" fieldType="LRoot" ref="nn" value="softmax" />
78+
<new def="sparse_softmax_cross_entropy_with_logits" class="Ltensorflow/functions/sparse_softmax_cross_entropy_with_logits" />
79+
<putfield class="LRoot" field="sparse_softmax_cross_entropy_with_logits" fieldType="LRoot" ref="nn" value="sparse_softmax_cross_entropy_with_logits" />
7880
<new def="sigmoid" class="Ltensorflow/math/sigmoid" />
7981
<putfield class="LRoot" field="sigmoid" fieldType="LRoot" ref="nn" value="sigmoid" />
8082
<putfield class="LRoot" field="sigmoid" fieldType="LRoot" ref="math" value="sigmoid" />
@@ -697,6 +699,12 @@
697699
<return value="logits" />
698700
</method>
699701
</class>
702+
<class name="sparse_softmax_cross_entropy_with_logits" allocatable="true">
703+
<!-- https://www.tensorflow.org/versions/r2.9/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits -->
704+
<method name="do" descriptor="()LRoot;" numArgs="4" paramNames="self labels logits name">
705+
<return value="labels" />
706+
</method>
707+
</class>
700708
</package>
701709
<package name="tensorflow/estimator">
702710
<class name="Estimator" allocatable="true">
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# from https://www.tensorflow.org/versions/r2.9/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits
2+
3+
import tensorflow as tf
4+
5+
6+
def f(a):
7+
pass
8+
9+
10+
logits = tf.constant(
11+
[[2.0, -5.0, 0.5, -0.1], [0.0, 0.0, 1.9, 1.4], [-100.0, 100.0, -100.0, -100.0]]
12+
)
13+
labels = tf.constant([0, 3, 1])
14+
f(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits.numpy()))

com.ibm.wala.cast.python.test/data/tf2_test_tensor_list3.py

+2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
# Test https://github.com/wala/ML/issues/136.
2+
13
import tensorflow as tf
24

35

0 commit comments

Comments
 (0)