Skip to content

New API and new operator overloading issue #81

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
Jan 31, 2024
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,11 @@ public void testTf2()
testTf2("tf2_test_dataset10.py", "add", 2, 2, 2, 3);
testTf2("tf2_test_tensor_list.py", "add", 2, 2, 2, 3);
testTf2("tf2_test_tensor_list2.py", "add", 0, 0);
testTf2("tf2_test_tensor_list3.py", "add", 0, 0);
testTf2(
"tf2_test_tensor_list3.py",
"add",
0,
0); // NOTE: Change to 2, 2, 2, 3 once https://github.com/wala/ML/issues/136 is fixed.
testTf2("tf2_test_tensor_list4.py", "add", 0, 0);
testTf2("tf2_test_tensor_list5.py", "add", 0, 0);
testTf2("tf2_test_model_call.py", "SequentialModel.__call__", 1, 1, 3);
Expand All @@ -234,10 +238,10 @@ public void testTf2()
"neural_network.py",
"cross_entropy_loss",
1,
4,
8,
3); // NOTE: Change to 2 tensor parameters once https://github.com/wala/ML/issues/127 is
// fixed. Values 2 and 3 will correspond to the tensor parameters.
testTf2("neural_network.py", "run_optimization", 2, 2, 2, 3);
testTf2("neural_network.py", "run_optimization", 2, 3, 2, 3);
testTf2(
"neural_network.py",
"accuracy",
Expand All @@ -259,13 +263,19 @@ public void testTf2()
testTf2("tf2_test_add5.py", "f", 1, 1, 2);
testTf2("tf2_test_add6.py", "f", 1, 1, 2);
testTf2("multigpu_training.py", "run_optimization", 2, 4, 2, 3);
testTf2(
"multigpu_training.py",
"average_gradients",
0,
0); // NOTE: Change to 1, 1, 2 once https://github.com/wala/ML/issues/136 is fixed.
testTf2("tf2_test_reduce_mean.py", "f", 1, 1, 2);
testTf2("tf2_test_reduce_mean.py", "g", 1, 1, 2);
testTf2("tf2_test_reduce_mean.py", "h", 1, 1, 2);
testTf2("tf2_test_gradient.py", "f", 1, 1, 2);
testTf2("tf2_test_gradient2.py", "f", 1, 1, 2);
testTf2("tf2_test_multiply.py", "f", 1, 1, 2);
testTf2("tf2_test_multiply2.py", "f", 1, 1, 2);
testTf2("tf2_test_sparse_softmax_cross_entropy_with_logits.py", "f", 1, 1, 2);
}

private void testTf2(
Expand Down
8 changes: 8 additions & 0 deletions com.ibm.wala.cast.python.ml/data/tensorflow.xml
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,8 @@
<putfield class="LRoot" field="conv3d" fieldType="LRoot" ref="nn" value="conv3d" />
<new def="softmax" class="Ltensorflow/functions/softmax" />
<putfield class="LRoot" field="softmax" fieldType="LRoot" ref="nn" value="softmax" />
<new def="sparse_softmax_cross_entropy_with_logits" class="Ltensorflow/functions/sparse_softmax_cross_entropy_with_logits" />
<putfield class="LRoot" field="sparse_softmax_cross_entropy_with_logits" fieldType="LRoot" ref="nn" value="sparse_softmax_cross_entropy_with_logits" />
<new def="sigmoid" class="Ltensorflow/math/sigmoid" />
<putfield class="LRoot" field="sigmoid" fieldType="LRoot" ref="nn" value="sigmoid" />
<putfield class="LRoot" field="sigmoid" fieldType="LRoot" ref="math" value="sigmoid" />
Expand Down Expand Up @@ -697,6 +699,12 @@
<return value="logits" />
</method>
</class>
<class name="sparse_softmax_cross_entropy_with_logits" allocatable="true">
<!-- https://www.tensorflow.org/versions/r2.9/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits -->
<method name="do" descriptor="()LRoot;" numArgs="4" paramNames="self labels logits name">
<return value="labels" />
</method>
</class>
</package>
<package name="tensorflow/estimator">
<class name="Estimator" allocatable="true">
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# from https://www.tensorflow.org/versions/r2.9/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits

import tensorflow as tf


def f(a):
pass


logits = tf.constant(
[[2.0, -5.0, 0.5, -0.1], [0.0, 0.0, 1.9, 1.4], [-100.0, 100.0, -100.0, -100.0]]
)
labels = tf.constant([0, 3, 1])
f(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits.numpy()))
2 changes: 2 additions & 0 deletions com.ibm.wala.cast.python.test/data/tf2_test_tensor_list3.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
# Test https://github.com/wala/ML/issues/136.

import tensorflow as tf


Expand Down