@@ -44,8 +44,6 @@ def test_load_tensorflow_repository_from_hf():
4444 # folder contains all config files and pytorch_model.bin
4545 folder_contents = os .listdir (storage_folder )
4646 assert "pytorch_model.bin" not in folder_contents
47- # custom requirements.txt for custom handler
48- assert "requirements.txt" in folder_contents
4947 # filter framework
5048 assert "tf_model.h5" in folder_contents
5149 # revision doesn't have tokenizer
@@ -62,8 +60,6 @@ def test_load_onnx_repository_from_hf():
6260 # folder contains all config files and pytorch_model.bin
6361 folder_contents = os .listdir (storage_folder )
6462 assert "pytorch_model.bin" not in folder_contents
65- # custom requirements.txt for custom handler
66- assert "requirements.txt" in folder_contents
6763 # filter framework
6864 assert "tf_model.h5" not in folder_contents
6965 # onnx model
@@ -85,8 +81,6 @@ def test_load_pytorch_repository_from_hf():
8581 # folder contains all config files and pytorch_model.bin
8682 folder_contents = os .listdir (storage_folder )
8783 assert "pytorch_model.bin" in folder_contents
88- # custom requirements.txt for custom handler
89- assert "requirements.txt" in folder_contents
9084 # filter framework
9185 assert "tf_model.h5" not in folder_contents
9286 # revision doesn't have tokenizer
@@ -126,7 +120,6 @@ def test_get_pipeline():
126120@require_torch
127121def test_whisper_long_audio ():
128122 with tempfile .TemporaryDirectory () as tmpdirname :
129-
130123 storage_dir = _load_repository_from_hf ("openai/whisper-tiny" , tmpdirname , framework = "pytorch" )
131124 pipe = get_pipeline ("automatic-speech-recognition" , storage_dir .as_posix ())
132125 res = pipe (os .path .join (os .getcwd (), "tests/resources/audio" , "long_sample.mp3" ))
0 commit comments