diff --git a/sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py b/sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py index 4c8e8ebf489a..b0140793cf79 100644 --- a/sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py +++ b/sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py @@ -506,7 +506,6 @@ def test_big_query_write_insert_non_transient_api_call_error(self): equal_to(bq_result_errors)) @pytest.mark.it_postcommit - @retry(reraise=True, stop=stop_after_attempt(3)) @parameterized.expand([ param(file_format=FileFormat.AVRO), param(file_format=FileFormat.JSON), @@ -514,6 +513,7 @@ def test_big_query_write_insert_non_transient_api_call_error(self): ]) @mock.patch( "apache_beam.io.gcp.bigquery_file_loads._MAXIMUM_SOURCE_URIS", new=1) + @retry(reraise=True, stop=stop_after_attempt(3)) def test_big_query_write_temp_table_append_schema_update(self, file_format): """ Test that nested schema update options and schema relaxation diff --git a/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py b/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py index 52c525cc0eaf..52123516de1a 100644 --- a/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py +++ b/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py @@ -65,7 +65,7 @@ def predict(self, input: tf.Tensor, add=False): def _create_mult2_model(): - inputs = tf.keras.Input(shape=(3)) + inputs = tf.keras.Input(shape=(3, )) outputs = tf.keras.layers.Lambda(lambda x: x * 2, dtype='float32')(inputs) return tf.keras.Model(inputs=inputs, outputs=outputs) @@ -127,7 +127,7 @@ def test_predict_tensor(self): def test_predict_tensor_with_batch_size(self): model = _create_mult2_model() - model_path = os.path.join(self.tmpdir, 'mult2') + model_path = os.path.join(self.tmpdir, 'mult2.keras') tf.keras.models.save_model(model, model_path) with TestPipeline() as pipeline: @@ -146,6 +146,7 @@ def fake_batching_inference_fn( model_handler = TFModelHandlerTensor( model_uri=model_path, inference_fn=fake_batching_inference_fn, + load_model_args={'safe_mode': False}, min_batch_size=2, max_batch_size=2) examples = [ @@ -172,7 +173,7 @@ def fake_batching_inference_fn( def test_predict_tensor_with_large_model(self): model = _create_mult2_model() - model_path = os.path.join(self.tmpdir, 'mult2') + model_path = os.path.join(self.tmpdir, 'mult2.keras') tf.keras.models.save_model(model, model_path) with TestPipeline() as pipeline: @@ -193,6 +194,7 @@ def fake_batching_inference_fn( model_handler = TFModelHandlerTensor( model_uri=model_path, inference_fn=fake_batching_inference_fn, + load_model_args={'safe_mode': False}, large_model=True) examples = [ tf.convert_to_tensor(numpy.array([1.1, 2.2, 3.3], dtype='float32')), @@ -218,7 +220,7 @@ def fake_batching_inference_fn( def test_predict_numpy_with_batch_size(self): model = _create_mult2_model() - model_path = os.path.join(self.tmpdir, 'mult2_numpy') + model_path = os.path.join(self.tmpdir, 'mult2_numpy.keras') tf.keras.models.save_model(model, model_path) with TestPipeline() as pipeline: @@ -237,6 +239,7 @@ def fake_batching_inference_fn( model_handler = TFModelHandlerNumpy( model_uri=model_path, inference_fn=fake_batching_inference_fn, + load_model_args={'safe_mode': False}, min_batch_size=2, max_batch_size=2) examples = [ @@ -260,7 +263,7 @@ def fake_batching_inference_fn( def test_predict_numpy_with_large_model(self): model = _create_mult2_model() - model_path = os.path.join(self.tmpdir, 'mult2_numpy') + model_path = os.path.join(self.tmpdir, 'mult2_numpy.keras') tf.keras.models.save_model(model, model_path) with TestPipeline() as pipeline: @@ -280,6 +283,7 @@ def fake_inference_fn( model_handler = TFModelHandlerNumpy( model_uri=model_path, + load_model_args={'safe_mode': False}, inference_fn=fake_inference_fn, large_model=True) examples = [