diff --git a/apps/common-app/src/examples/AudioTag/AudioTag.tsx b/apps/common-app/src/examples/AudioTag/AudioTag.tsx
new file mode 100644
index 000000000..9ee2dac7d
--- /dev/null
+++ b/apps/common-app/src/examples/AudioTag/AudioTag.tsx
@@ -0,0 +1,25 @@
+import React, { useRef } from 'react';
+import { View } from 'react-native';
+import { Audio } from 'react-native-audio-api/development/react';
+
+import { Container } from '../../components';
+import { AudioContext } from 'react-native-audio-api';
+
+const DEMO_AUDIO_URL =
+ 'https://software-mansion.github.io/react-native-audio-api/audio/music/example-music-02.mp3';
+ // '/data/data/com.fabricexample/cache/audio.wav';
+
+const AudioTag: React.FC = () => {
+ const audioContext = useRef(new AudioContext());
+ return (
+
+
+
+
+
+
+
+ );
+};
+
+export default AudioTag;
diff --git a/apps/common-app/src/examples/AudioTag/index.ts b/apps/common-app/src/examples/AudioTag/index.ts
new file mode 100644
index 000000000..ad47e827f
--- /dev/null
+++ b/apps/common-app/src/examples/AudioTag/index.ts
@@ -0,0 +1 @@
+export { default } from './AudioTag';
diff --git a/apps/common-app/src/examples/index.ts b/apps/common-app/src/examples/index.ts
index 554dcaf49..c62486643 100644
--- a/apps/common-app/src/examples/index.ts
+++ b/apps/common-app/src/examples/index.ts
@@ -12,6 +12,7 @@ import PlaybackSpeed from './PlaybackSpeed/PlaybackSpeed';
import Record from './Record/Record';
import Streaming from './Streaming/Streaming';
import Worklets from './Worklets/Worklets';
+import AudioStream from './AudioTag/AudioTag';
type NavigationParamList = {
Oscillator: undefined;
@@ -26,6 +27,7 @@ type NavigationParamList = {
Record: undefined;
Worklets: undefined;
Streamer: undefined;
+ AudioTag: undefined;
};
export type ExampleKey = keyof NavigationParamList;
@@ -110,4 +112,10 @@ export const Examples: Example[] = [
Icon: icons.Radio,
screen: Streaming,
},
+ {
+ key: 'AudioTag',
+ title: 'Audio Tag',
+ Icon: icons.Tag,
+ screen: AudioStream,
+ }
] as const;
diff --git a/apps/fabric-example/ios/Podfile.lock b/apps/fabric-example/ios/Podfile.lock
index a344b24dc..0ff51c1dc 100644
--- a/apps/fabric-example/ios/Podfile.lock
+++ b/apps/fabric-example/ios/Podfile.lock
@@ -2514,7 +2514,7 @@ EXTERNAL SOURCES:
SPEC CHECKSUMS:
FBLazyVector: e97c19a5a442429d1988f182a1940fb08df514da
- hermes-engine: ca0c1d4fe0200e05fedd8d7c0c283b54cd461436
+ hermes-engine: 471e81260adadffc041e40c5eea01333addabb53
RCTDeprecation: af44b104091a34482596cd9bd7e8d90c4e9b4bd7
RCTRequired: bb77b070f75f53398ce43c0aaaa58337cebe2bf6
RCTSwiftUI: afc0a0a635860da1040a0b894bfd529da06d7810
@@ -2523,7 +2523,7 @@ SPEC CHECKSUMS:
React: 1ba7d364ade7d883a1ec055bfc3606f35fdee17b
React-callinvoker: bc2a26f8d84fb01f003fc6de6c9337b64715f95b
React-Core: 7840d3a80b43a95c5e80ef75146bd70925ebab0f
- React-Core-prebuilt: e44365cf4785c3aa56ababc9ab204fe8bc6b17d0
+ React-Core-prebuilt: 6586031f606ff8ab466cac9e8284053a91342881
React-CoreModules: 2eb010400b63b89e53a324ffb3c112e4c7c3ce42
React-cxxreact: a558e92199d26f145afa9e62c4233cf8e7950efe
React-debug: 755200a6e7f5e6e0a40ff8d215493d43cce285fc
@@ -2587,7 +2587,7 @@ SPEC CHECKSUMS:
ReactAppDependencyProvider: e96e93b493d8d86eeaee3e590ba0be53f6abe46f
ReactCodegen: f66521b131699d6af0790f10653933b3f1f79a6f
ReactCommon: 07572bf9e687c8a52fbe4a3641e9e3a1a477c78e
- ReactNativeDependencies: 3467a1fea6f7a524df13b30430bebcc254d9aee2
+ ReactNativeDependencies: a5d71d95f2654107eb45e6ece04caba36beac2bd
RNAudioAPI: fa5c075d2fcdb1ad9a695754b38f07c8c3074396
RNGestureHandler: 07de6f059e0ee5744ae9a56feb07ee345338cc31
RNReanimated: d75c81956bf7531fe08ba4390149002ab8bdd127
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp
index f4c22c8b2..4aec47875 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp
@@ -14,6 +14,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -58,6 +59,7 @@ BaseAudioContextHostObject::BaseAudioContextHostObject(
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBiquadFilter),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createIIRFilter),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBufferSource),
+ JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createFileSource),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBufferQueueSource),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createPeriodicWave),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createConvolver),
@@ -250,6 +252,26 @@ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createBufferSource) {
return jsi::Object::createFromHostObject(runtime, bufferSourceHostObject);
}
+JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createFileSource) {
+ AudioFileSourceOptions options;
+ if (count > 0 && !args[0].isUndefined() && !args[0].isNull()) {
+ if (args[0].isString()) {
+ options.filePath = args[0].getString(runtime).utf8(runtime);
+ } else {
+ auto obj = args[0].asObject(runtime);
+ if (obj.isArrayBuffer(runtime)) {
+ auto arrayBuffer = obj.getArrayBuffer(runtime);
+ auto *data = arrayBuffer.data(runtime);
+ auto size = arrayBuffer.size(runtime);
+ options.data = std::vector(data, data + size);
+ }
+ }
+ }
+ const auto fileSourceHostObject =
+ std::make_shared(context_, options);
+ return jsi::Object::createFromHostObject(runtime, fileSourceHostObject);
+}
+
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createBufferQueueSource) {
const auto options = args[0].asObject(runtime);
const auto baseAudioBufferSourceOptions =
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h
index 052538058..4041f8f51 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h
@@ -39,6 +39,7 @@ class BaseAudioContextHostObject : public JsiHostObject {
JSI_HOST_FUNCTION_DECL(createBiquadFilter);
JSI_HOST_FUNCTION_DECL(createIIRFilter);
JSI_HOST_FUNCTION_DECL(createBufferSource);
+ JSI_HOST_FUNCTION_DECL(createFileSource);
JSI_HOST_FUNCTION_DECL(createBufferQueueSource);
JSI_HOST_FUNCTION_DECL(createPeriodicWave);
JSI_HOST_FUNCTION_DECL(createAnalyser);
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioFileSourceNodeHostObject.cpp b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioFileSourceNodeHostObject.cpp
new file mode 100644
index 000000000..e9218f6c7
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioFileSourceNodeHostObject.cpp
@@ -0,0 +1,132 @@
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+namespace audioapi {
+
+AudioFileSourceNodeHostObject::AudioFileSourceNodeHostObject(
+ const std::shared_ptr &context,
+ const AudioFileSourceOptions &options)
+ : AudioNodeHostObject(context->createFileSource(options), options) {
+ addGetters(
+ JSI_EXPORT_PROPERTY_GETTER(AudioFileSourceNodeHostObject, volume),
+ JSI_EXPORT_PROPERTY_GETTER(AudioFileSourceNodeHostObject, loop),
+ JSI_EXPORT_PROPERTY_GETTER(AudioFileSourceNodeHostObject, currentTime),
+ JSI_EXPORT_PROPERTY_GETTER(AudioFileSourceNodeHostObject, duration));
+ addSetters(
+ JSI_EXPORT_PROPERTY_SETTER(AudioFileSourceNodeHostObject, onPositionChanged),
+ JSI_EXPORT_PROPERTY_SETTER(AudioFileSourceNodeHostObject, onEnded),
+ JSI_EXPORT_PROPERTY_SETTER(AudioFileSourceNodeHostObject, volume),
+ JSI_EXPORT_PROPERTY_SETTER(AudioFileSourceNodeHostObject, loop));
+
+ addFunctions(
+ JSI_EXPORT_FUNCTION(AudioFileSourceNodeHostObject, pause),
+ JSI_EXPORT_FUNCTION(AudioFileSourceNodeHostObject, start),
+ JSI_EXPORT_FUNCTION(AudioFileSourceNodeHostObject, seekToTime));
+}
+
+AudioFileSourceNodeHostObject::~AudioFileSourceNodeHostObject() {
+ setOnPositionChangedCallbackId(0);
+ setOnEndedCallbackId(0);
+}
+
+JSI_PROPERTY_GETTER_IMPL(AudioFileSourceNodeHostObject, volume) {
+ auto node = std::static_pointer_cast(node_);
+ return {node->getVolume()};
+}
+
+JSI_PROPERTY_SETTER_IMPL(AudioFileSourceNodeHostObject, volume) {
+ auto node = std::static_pointer_cast(node_);
+ node->setVolume(static_cast(value.getNumber()));
+}
+
+JSI_PROPERTY_GETTER_IMPL(AudioFileSourceNodeHostObject, loop) {
+ auto node = std::static_pointer_cast(node_);
+ return {node->getLoop()};
+}
+
+JSI_PROPERTY_SETTER_IMPL(AudioFileSourceNodeHostObject, loop) {
+ auto node = std::static_pointer_cast(node_);
+ node->setLoop(value.getBool());
+}
+
+JSI_PROPERTY_GETTER_IMPL(AudioFileSourceNodeHostObject, currentTime) {
+ auto node = std::static_pointer_cast(node_);
+ return {node->getCurrentTime()};
+}
+
+JSI_PROPERTY_GETTER_IMPL(AudioFileSourceNodeHostObject, duration) {
+ auto node = std::static_pointer_cast(node_);
+ return {node->getDuration()};
+}
+
+JSI_HOST_FUNCTION_IMPL(AudioFileSourceNodeHostObject, start) {
+ auto audioFileSourceNode = std::static_pointer_cast(node_);
+ auto event = [audioFileSourceNode](BaseAudioContext &) {
+ audioFileSourceNode->start();
+ };
+ audioFileSourceNode->scheduleAudioEvent(std::move(event));
+
+ return jsi::Value::undefined();
+}
+
+JSI_HOST_FUNCTION_IMPL(AudioFileSourceNodeHostObject, pause) {
+ auto audioFileSourceNode = std::static_pointer_cast(node_);
+ audioFileSourceNode->pause();
+ return jsi::Value::undefined();
+}
+
+JSI_HOST_FUNCTION_IMPL(AudioFileSourceNodeHostObject, seekToTime) {
+ auto audioFileSourceNode = std::static_pointer_cast(node_);
+ if (count < 1 || !args[0].isNumber()) {
+ return jsi::Value::undefined();
+ }
+ const double t = args[0].getNumber();
+
+ auto event = [audioFileSourceNode, t](BaseAudioContext &) {
+ audioFileSourceNode->seekToTime(t);
+ };
+ audioFileSourceNode->scheduleAudioEvent(std::move(event));
+
+ return jsi::Value::undefined();
+}
+
+JSI_PROPERTY_SETTER_IMPL(AudioFileSourceNodeHostObject, onPositionChanged) {
+ auto callbackId = std::stoull(value.getString(runtime).utf8(runtime));
+ setOnPositionChangedCallbackId(callbackId);
+}
+
+void AudioFileSourceNodeHostObject::setOnPositionChangedCallbackId(uint64_t callbackId) {
+ auto sourceNode = std::static_pointer_cast(node_);
+
+ auto event = [sourceNode, callbackId](BaseAudioContext &) {
+ sourceNode->setOnPositionChangedCallbackId(callbackId);
+ };
+
+ sourceNode->unregisterOnPositionChangedCallback(onPositionChangedCallbackId_);
+ sourceNode->scheduleAudioEvent(std::move(event));
+ onPositionChangedCallbackId_ = callbackId;
+}
+
+JSI_PROPERTY_SETTER_IMPL(AudioFileSourceNodeHostObject, onEnded) {
+ auto callbackId = std::stoull(value.getString(runtime).utf8(runtime));
+ setOnEndedCallbackId(callbackId);
+}
+
+void AudioFileSourceNodeHostObject::setOnEndedCallbackId(uint64_t callbackId) {
+ auto sourceNode = std::static_pointer_cast(node_);
+
+ auto event = [sourceNode, callbackId](BaseAudioContext &) {
+ sourceNode->setOnEndedCallbackId(callbackId);
+ };
+
+ sourceNode->unregisterOnEndedCallback(onEndedCallbackId_);
+ sourceNode->scheduleAudioEvent(std::move(event));
+ onEndedCallbackId_ = callbackId;
+}
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioFileSourceNodeHostObject.h b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioFileSourceNodeHostObject.h
new file mode 100644
index 000000000..85a44b30c
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioFileSourceNodeHostObject.h
@@ -0,0 +1,43 @@
+#pragma once
+
+#include
+
+#include
+
+namespace audioapi {
+using namespace facebook;
+
+struct AudioFileSourceOptions;
+class BaseAudioContext;
+
+class AudioFileSourceNodeHostObject : public AudioNodeHostObject {
+ public:
+ explicit AudioFileSourceNodeHostObject(
+ const std::shared_ptr &context,
+ const AudioFileSourceOptions &options);
+
+ ~AudioFileSourceNodeHostObject() override;
+
+ JSI_PROPERTY_GETTER_DECL(volume);
+ JSI_PROPERTY_SETTER_DECL(volume);
+ JSI_PROPERTY_GETTER_DECL(loop);
+ JSI_PROPERTY_SETTER_DECL(loop);
+ JSI_PROPERTY_GETTER_DECL(currentTime);
+ JSI_PROPERTY_GETTER_DECL(duration);
+ JSI_PROPERTY_SETTER_DECL(onPositionChanged);
+ JSI_PROPERTY_SETTER_DECL(onEnded);
+
+ JSI_HOST_FUNCTION_DECL(pause);
+ JSI_HOST_FUNCTION_DECL(start);
+ JSI_HOST_FUNCTION_DECL(seekToStart);
+ JSI_HOST_FUNCTION_DECL(seekToTime);
+
+ private:
+ uint64_t onPositionChangedCallbackId_ = 0;
+ uint64_t onEndedCallbackId_ = 0;
+
+ void setOnPositionChangedCallbackId(uint64_t callbackId);
+ void setOnEndedCallbackId(uint64_t callbackId);
+};
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp
index 67e4013e0..fe978c05e 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp
@@ -12,6 +12,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -177,6 +178,15 @@ std::shared_ptr BaseAudioContext::createBufferSource(
return bufferSource;
}
+#if !RN_AUDIO_API_TEST
+std::shared_ptr BaseAudioContext::createFileSource(
+ const AudioFileSourceOptions &options) {
+ auto fileSource = std::make_shared(shared_from_this(), options);
+ // graphManager_->addProcessingNode(fileSource);
+ return fileSource;
+}
+#endif // RN_AUDIO_API_TEST
+
std::shared_ptr BaseAudioContext::createIIRFilter(const IIRFilterOptions &options) {
auto iirFilter = std::make_shared(shared_from_this(), options);
graphManager_->addProcessingNode(iirFilter);
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h
index e3f52fd28..4e2d97e05 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h
@@ -28,6 +28,7 @@ class IIRFilterNode;
class AudioDestinationNode;
class AudioBufferSourceNode;
class AudioBufferQueueSourceNode;
+class AudioFileSourceNode;
class AnalyserNode;
class AudioEventHandlerRegistry;
class ConvolverNode;
@@ -47,6 +48,7 @@ struct BiquadFilterOptions;
struct OscillatorOptions;
struct BaseAudioBufferSourceOptions;
struct AudioBufferSourceOptions;
+struct AudioFileSourceOptions;
struct StreamerOptions;
struct DelayOptions;
struct IIRFilterOptions;
@@ -93,6 +95,9 @@ class BaseAudioContext : public std::enable_shared_from_this {
std::shared_ptr createBiquadFilter(const BiquadFilterOptions &options);
std::shared_ptr createBufferSource(
const AudioBufferSourceOptions &options);
+#if !RN_AUDIO_API_TEST
+ std::shared_ptr createFileSource(const AudioFileSourceOptions &options);
+#endif // RN_AUDIO_API_TEST
std::shared_ptr createBufferQueueSource(
const BaseAudioBufferSourceOptions &options);
std::shared_ptr createPeriodicWave(
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioFileSourceNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioFileSourceNode.cpp
new file mode 100644
index 000000000..8e7817fa8
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioFileSourceNode.cpp
@@ -0,0 +1,344 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace audioapi {
+
+AudioFileSourceNode::AudioFileSourceNode(
+ const std::shared_ptr &context,
+ const AudioFileSourceOptions &options)
+ : AudioNode(context, options),
+ audioEventHandlerRegistry_(context->getAudioEventHandlerRegistry()),
+ onPositionChangedInterval_(
+ static_cast(context->getSampleRate() * ON_POSITION_CHANGED_INTERVAL)) {
+ const bool useFilePath = !options.filePath.empty();
+ const bool useData = !options.data.empty();
+
+ if (useFilePath || useData) {
+ auto state = std::make_shared();
+ if (useData) {
+ state->memoryData = options.data;
+ }
+ if (useFilePath) {
+ state->filePath = options.filePath;
+ requiresFFmpeg_ = AudioDecoder::pathHasExtension(options.filePath, {".mp4", ".m4a", ".aac"});
+ } else {
+ auto format = AudioDecoder::detectAudioFormat(options.data.data(), options.data.size());
+ requiresFFmpeg_ =
+ format == AudioFormat::MP4 || format == AudioFormat::M4A || format == AudioFormat::AAC;
+ }
+ initDecoders(useFilePath, context, state);
+ }
+
+ if (decoderState_ == nullptr) {
+ assert(false && "cannot initialize decoder");
+ return;
+ }
+
+ seekOffloader_ = std::make_unique>(
+ SEEK_OFFLOADER_WORKER_COUNT, [this](OffloadedSeekRequest req) { runOffloadedSeekTask(req); });
+
+ isInitialized_.store(true, std::memory_order_release);
+}
+
+void AudioFileSourceNode::setOnPositionChangedCallbackId(uint64_t callbackId) {
+ onPositionChangedCallbackId_ = callbackId;
+}
+
+void AudioFileSourceNode::unregisterOnPositionChangedCallback(uint64_t callbackId) {
+ audioEventHandlerRegistry_->unregisterHandler(AudioEvent::POSITION_CHANGED, callbackId);
+}
+
+void AudioFileSourceNode::setOnEndedCallbackId(uint64_t callbackId) {
+ onEndedCallbackId_ = callbackId;
+}
+
+void AudioFileSourceNode::unregisterOnEndedCallback(uint64_t callbackId) {
+ audioEventHandlerRegistry_->unregisterHandler(AudioEvent::ENDED, callbackId);
+}
+
+void AudioFileSourceNode::sendOnPositionChangedEvent(int samplesWritten) {
+ currentTime_.fetch_add(samplesWritten / sampleRate_);
+ if (onPositionChangedCallbackId_ != 0 &&
+ (onPositionChangedFlush_.load(std::memory_order_acquire) ||
+ onPositionChangedTime_ > onPositionChangedInterval_)) {
+ std::unordered_map body = {{"value", getCurrentTime()}};
+
+ audioEventHandlerRegistry_->invokeHandlerWithEventBody(
+ AudioEvent::POSITION_CHANGED, onPositionChangedCallbackId_, body);
+
+ onPositionChangedTime_ = 0;
+ onPositionChangedFlush_.store(false, std::memory_order_release);
+ }
+
+ onPositionChangedTime_ += samplesWritten;
+}
+
+void AudioFileSourceNode::sendOnEndedEvent() {
+ if (onEndedCallbackId_ != 0) {
+ audioEventHandlerRegistry_->invokeHandlerWithEventBody(
+ AudioEvent::ENDED, onEndedCallbackId_, {});
+ }
+}
+
+void AudioFileSourceNode::initDecoders(
+ bool useFilePath,
+ const std::shared_ptr &context,
+ const std::shared_ptr &state) {
+ if (requiresFFmpeg_) {
+#if RN_AUDIO_API_FFMPEG_DISABLED
+ assert(false && "File codec is not supported when FFmpeg is disabled");
+#else
+ ffmpegdecoder::ffmpegDecoderConfigInit(&cfg, static_cast(context->getSampleRate()));
+ bool result;
+ if (useFilePath) {
+ result = ffmpegDecoder_.openFile(cfg, state->filePath);
+ } else {
+ result = ffmpegDecoder_.openMemory(cfg, state->memoryData.data(), state->memoryData.size());
+ }
+ if (result) {
+ state->channels = ffmpegDecoder_.outputChannels();
+ state->sampleRate = static_cast(ffmpegDecoder_.outputSampleRate());
+ duration_.store(ffmpegDecoder_.getDurationInSeconds(), std::memory_order_release);
+ } else {
+ ffmpegDecoder_.close();
+ }
+#endif // RN_AUDIO_API_FFMPEG_DISABLED
+ } else {
+ ma_decoder_config config =
+ ma_decoder_config_init(ma_format_f32, 0, static_cast(context->getSampleRate()));
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
+ ma_decoding_backend_vtable *customBackends[] = {
+ ma_decoding_backend_libvorbis, ma_decoding_backend_libopus};
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay)
+ config.ppCustomBackendVTables = customBackends;
+ config.customBackendCount = sizeof(customBackends) / sizeof(customBackends[0]);
+
+ maDecoder_ = std::make_unique();
+ ma_result result;
+ if (useFilePath) {
+ result = ma_decoder_init_file(state->filePath.c_str(), &config, maDecoder_.get());
+ } else {
+ result = ma_decoder_init_memory(
+ state->memoryData.data(), state->memoryData.size(), &config, maDecoder_.get());
+ }
+
+ if (result == MA_SUCCESS) {
+ state->channels = static_cast(maDecoder_->outputChannels);
+ state->sampleRate = static_cast(maDecoder_->outputSampleRate);
+ ma_uint64 length = 0;
+ if (ma_decoder_get_length_in_pcm_frames(maDecoder_.get(), &length) == MA_SUCCESS) {
+ duration_.store(static_cast(length) / state->sampleRate, std::memory_order_release);
+ }
+ } else {
+ ma_decoder_uninit(maDecoder_.get());
+ maDecoder_.reset();
+ }
+ }
+ state->interleavedBuffer.resize(static_cast(RENDER_QUANTUM_SIZE) * state->channels);
+ decoderState_ = state;
+ channelCount_ = decoderState_->channels;
+ sampleRate_ = decoderState_->sampleRate;
+}
+
+// Same as AudioScheduledSourceNode::start: start or resume the native engine
+void AudioFileSourceNode::start() {
+ filePaused_.store(false, std::memory_order_release);
+
+ if (std::shared_ptr ctx = context_.lock();
+ auto *audioContext = dynamic_cast(ctx.get())) {
+ if (audioContext->getState() != ContextState::RUNNING) {
+ audioContext->start();
+ }
+ }
+}
+
+void AudioFileSourceNode::pause() {
+ filePaused_.store(true, std::memory_order_release);
+}
+
+void AudioFileSourceNode::disable() {
+ seekOffloader_.reset();
+ filePaused_.store(false, std::memory_order_release);
+ if (requiresFFmpeg_) {
+ ffmpegDecoder_.close();
+ } else if (maDecoder_ != nullptr) {
+ ma_decoder_uninit(maDecoder_.get());
+ maDecoder_.reset();
+ }
+}
+
+size_t AudioFileSourceNode::readFrames(float *buf, size_t frameCount) {
+ if (pendingOffloadedSeeks_.load(std::memory_order_acquire) > 0) {
+ return 0;
+ }
+ if (requiresFFmpeg_) {
+#if !RN_AUDIO_API_FFMPEG_DISABLED
+ return ffmpegDecoder_.readPcmFrames(buf, frameCount);
+#else
+ return 0;
+#endif
+ }
+ if (maDecoder_ == nullptr) {
+ return 0;
+ }
+ ma_uint64 framesRead = 0;
+ ma_decoder_read_pcm_frames(maDecoder_.get(), buf, frameCount, &framesRead);
+ return static_cast(framesRead);
+}
+
+bool AudioFileSourceNode::seekDecoderToTime(double seconds) {
+ bool seeked = false;
+ if (requiresFFmpeg_) {
+#if !RN_AUDIO_API_FFMPEG_DISABLED
+ seeked = ffmpegDecoder_.seekToTime(seconds);
+#endif
+ } else if (maDecoder_ != nullptr && sampleRate_ > 0) {
+ const auto frame = static_cast(std::llround(seconds * sampleRate_));
+ seeked = ma_decoder_seek_to_pcm_frame(maDecoder_.get(), frame) == MA_SUCCESS;
+ }
+ return seeked;
+}
+
+void AudioFileSourceNode::applyPlaybackStateAfterSuccessfulSeek(double seconds) {
+ currentTime_.store(seconds, std::memory_order_release);
+ onPositionChangedFlush_.store(true, std::memory_order_release);
+}
+
+void AudioFileSourceNode::runOffloadedSeekTask(OffloadedSeekRequest req) {
+ if (decoderState_ == nullptr) {
+ pendingOffloadedSeeks_.fetch_sub(1, std::memory_order_acq_rel);
+ return;
+ }
+ if (seekDecoderToTime(req.seconds)) {
+ applyPlaybackStateAfterSuccessfulSeek(req.seconds);
+ }
+ pendingOffloadedSeeks_.fetch_sub(1, std::memory_order_acq_rel);
+}
+
+void AudioFileSourceNode::seekToTime(double seconds) {
+ if (decoderState_ == nullptr) {
+ return;
+ }
+ const double dur = duration_.load(std::memory_order_acquire);
+ if (dur > 0) {
+ seconds = std::clamp(seconds, 0.0, dur);
+ } else {
+ seconds = std::max(0.0, seconds);
+ }
+ pendingOffloadedSeeks_.fetch_add(1, std::memory_order_acq_rel);
+ seekOffloader_->getSender()->send(OffloadedSeekRequest{seconds});
+}
+
+void AudioFileSourceNode::writeInterleavedToBuffer(
+ const std::shared_ptr &processingBuffer,
+ const AudioFileDecoderState &state,
+ size_t destSampleOffset,
+ size_t frameCount,
+ float vol) {
+ if (vol == 0) {
+ processingBuffer->zero();
+ return;
+ }
+ auto numOutputChannels = static_cast(processingBuffer->getNumberOfChannels());
+ for (size_t i = 0; i < frameCount; i++) {
+ for (int ch = 0; ch < numOutputChannels; ch++) {
+ int srcCh = ch < state.channels ? ch : state.channels - 1;
+ processingBuffer->getChannel(ch)->span()[destSampleOffset + i] =
+ vol * state.interleavedBuffer[i * state.channels + srcCh];
+ }
+ }
+}
+
+size_t AudioFileSourceNode::handleEof(
+ const std::shared_ptr &processingBuffer,
+ size_t framesToProcess,
+ size_t framesRead,
+ float vol) {
+ if (!loop_.load(std::memory_order_acquire)) {
+ return framesRead;
+ }
+
+ if (!seekDecoderToTime(0)) {
+ return framesRead;
+ }
+
+ size_t toFill = framesToProcess - framesRead;
+ if (toFill == 0) {
+ return framesRead;
+ }
+
+ auto &state = *decoderState_;
+ size_t extra = readFrames(state.interleavedBuffer.data(), toFill);
+
+ if (vol != 0) {
+ writeInterleavedToBuffer(processingBuffer, state, framesRead, extra, vol);
+ }
+
+ return framesRead + extra;
+}
+
+std::shared_ptr AudioFileSourceNode::processNode(
+ const std::shared_ptr &processingBuffer,
+ int framesToProcess) {
+ if (decoderState_ == nullptr) {
+ processingBuffer->zero();
+ return processingBuffer;
+ }
+
+ if (pendingOffloadedSeeks_.load(std::memory_order_acquire) > 0) {
+ processingBuffer->zero();
+ return processingBuffer;
+ }
+
+ if (filePaused_.load(std::memory_order_acquire)) {
+ processingBuffer->zero();
+ return processingBuffer;
+ }
+
+ auto &state = *decoderState_;
+
+ size_t framesRead = readFrames(state.interleavedBuffer.data(), framesToProcess);
+ sendOnPositionChangedEvent(static_cast(framesRead));
+
+ const float vol = volume_.load(std::memory_order_acquire);
+ writeInterleavedToBuffer(processingBuffer, state, 0, framesRead, vol);
+
+ if (framesRead < framesToProcess) {
+ if (!loop_.load(std::memory_order_acquire)) {
+ sendOnEndedEvent();
+ onPositionChangedFlush_.store(true, std::memory_order_release);
+ sendOnPositionChangedEvent(static_cast(framesToProcess - framesRead));
+ processingBuffer->zero(framesRead, framesToProcess - framesRead);
+ return processingBuffer;
+ }
+ size_t totalFilled = handleEof(processingBuffer, framesToProcess, framesRead, vol);
+ onPositionChangedFlush_.store(true, std::memory_order_release);
+ sendOnPositionChangedEvent(static_cast(totalFilled));
+ processingBuffer->zero(totalFilled, framesToProcess - totalFilled);
+ }
+
+ return processingBuffer;
+}
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioFileSourceNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioFileSourceNode.h
new file mode 100644
index 000000000..2fa1e140b
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioFileSourceNode.h
@@ -0,0 +1,149 @@
+#pragma once
+
+#include
+#if !RN_AUDIO_API_FFMPEG_DISABLED
+#include
+#endif // RN_AUDIO_API_FFMPEG_DISABLED
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+using namespace audioapi::channels;
+
+namespace audioapi {
+
+struct AudioFileSourceOptions;
+
+struct OffloadedSeekRequest {
+ double seconds = 0;
+ OffloadedSeekRequest() = default;
+ explicit OffloadedSeekRequest(double t) : seconds(t) {}
+};
+
+struct AudioFileDecoderState {
+ std::vector memoryData;
+ std::string filePath;
+ std::vector interleavedBuffer;
+ int channels = 0;
+ float sampleRate = 0;
+};
+
+class AudioFileSourceNode : public AudioNode {
+ public:
+ explicit AudioFileSourceNode(
+ const std::shared_ptr &context,
+ const AudioFileSourceOptions &options);
+ ~AudioFileSourceNode() override = default;
+
+ void disable() override;
+
+ void start();
+
+ float getVolume() const {
+ return volume_.load(std::memory_order_acquire);
+ }
+
+ void setVolume(float v) {
+ volume_.store(v, std::memory_order_release);
+ }
+
+ void pause();
+
+ /// @note Audio Thread only
+ void setOnPositionChangedCallbackId(uint64_t callbackId);
+ void unregisterOnPositionChangedCallback(uint64_t callbackId);
+
+ /// @note Audio Thread only
+ void setOnEndedCallbackId(uint64_t callbackId);
+ void unregisterOnEndedCallback(uint64_t callbackId);
+
+ bool getLoop() const {
+ return loop_.load(std::memory_order_acquire);
+ }
+
+ void setLoop(bool v) {
+ loop_.store(v, std::memory_order_release);
+ }
+
+ double getDuration() const {
+ return duration_.load(std::memory_order_acquire);
+ }
+
+ double getCurrentTime() const {
+ return currentTime_.load(std::memory_order_acquire);
+ }
+
+ /// Seek to \p seconds (clamped to [0, duration]).
+ void seekToTime(double seconds);
+
+ protected:
+ std::shared_ptr processNode(
+ const std::shared_ptr &processingBuffer,
+ int framesToProcess) override;
+
+ private:
+ void initDecoders(
+ bool useFilePath,
+ const std::shared_ptr &context,
+ const std::shared_ptr &state);
+
+ std::shared_ptr decoderState_;
+ std::unique_ptr maDecoder_;
+ std::atomic volume_;
+ bool requiresFFmpeg_;
+#if !RN_AUDIO_API_FFMPEG_DISABLED
+ ffmpegdecoder::FFmpegDecoder ffmpegDecoder_;
+ ffmpegdecoder::FFmpegDecoderConfig cfg;
+#endif // RN_AUDIO_API_FFMPEG_DISABLED
+ std::atomic filePaused_{false};
+ bool fileStarted_{false};
+ std::atomic loop_{false};
+ std::atomic duration_{0};
+ std::atomic currentTime_{0};
+ double sampleRate_{0};
+ const std::shared_ptr audioEventHandlerRegistry_;
+ static constexpr double ON_POSITION_CHANGED_INTERVAL = 0.25f;
+ static constexpr int SEEK_OFFLOADER_WORKER_COUNT = 16;
+
+ size_t readFrames(float *buf, size_t frameCount);
+ bool seekDecoderToTime(double seconds);
+ static void writeInterleavedToBuffer(
+ const std::shared_ptr &processingBuffer,
+ const AudioFileDecoderState &state,
+ size_t destSampleOffset,
+ size_t frameCount,
+ float vol);
+ size_t handleEof(
+ const std::shared_ptr &processingBuffer,
+ size_t framesToProcess,
+ size_t framesRead,
+ float vol);
+
+ void sendOnPositionChangedEvent(int samplesWritten);
+ void sendOnEndedEvent();
+
+ void applyPlaybackStateAfterSuccessfulSeek(double seconds);
+ void runOffloadedSeekTask(OffloadedSeekRequest req);
+
+ uint64_t onPositionChangedCallbackId_ = 0;
+ uint64_t onEndedCallbackId_ = 0;
+ std::atomic playbackFinished_{false};
+ int onPositionChangedInterval_;
+ int onPositionChangedTime_ = 0;
+ std::atomic onPositionChangedFlush_{true};
+
+ /// Pending offloaded seeks; while > 0 the audio thread must not read the decoder (outputs silence).
+ std::atomic pendingOffloadedSeeks_{0};
+
+ std::unique_ptr>
+ seekOffloader_;
+};
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioDecoder.h b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioDecoder.h
index 0c44d7419..1e5451996 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioDecoder.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioDecoder.h
@@ -39,16 +39,7 @@ class AudioDecoder {
int inputChannelCount,
bool interleaved);
- private:
- static AudioBufferResult decodeWithMiniaudio(float sampleRate, DecoderSource source);
- static Result, std::string> readAllPcmFrames(
- ma_decoder &decoder,
- int outputChannels);
- static AudioBufferResult makeAudioBufferFromFloatBuffer(
- const std::vector &buffer,
- float outputSampleRate,
- int outputChannels);
- static AudioFormat detectAudioFormat(const void *data, size_t size) {
+ [[nodiscard]] static AudioFormat detectAudioFormat(const void *data, size_t size) {
if (size < 12)
return AudioFormat::UNKNOWN;
const auto *bytes = static_cast(data);
@@ -84,7 +75,8 @@ class AudioDecoder {
}
return AudioFormat::UNKNOWN;
}
- static inline bool pathHasExtension(
+
+ [[nodiscard]] static inline bool pathHasExtension(
const std::string &path,
const std::vector &extensions) {
std::string pathLower = path;
@@ -95,6 +87,16 @@ class AudioDecoder {
}
return false;
}
+
+ private:
+ static AudioBufferResult decodeWithMiniaudio(float sampleRate, DecoderSource source);
+ static Result, std::string> readAllPcmFrames(
+ ma_decoder &decoder,
+ int outputChannels);
+ static AudioBufferResult makeAudioBufferFromFloatBuffer(
+ const std::vector &buffer,
+ float outputSampleRate,
+ int outputChannels);
[[nodiscard]] static inline int16_t floatToInt16(float sample) {
return static_cast(sample * INT16_MAX);
}
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp b/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp
index 041992795..6c93cdb13 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp
@@ -10,323 +10,498 @@
#if !RN_AUDIO_API_FFMPEG_DISABLED
#include
-#endif // RN_AUDIO_API_FFMPEG_DISABLED
-#include
+
+#include
+#include
+#include
+
+extern "C" {
+#include
+#include
+#include
+}
namespace audioapi::ffmpegdecoder {
int read_packet(void *opaque, uint8_t *buf, int buf_size) {
- MemoryIOContext *ctx = static_cast(opaque);
-
+ auto *ctx = static_cast(opaque);
if (ctx->pos >= ctx->size) {
return AVERROR_EOF;
}
-
- int bytes_to_read = std::min(buf_size, static_cast(ctx->size - ctx->pos));
- memcpy(buf, ctx->data + ctx->pos, bytes_to_read);
- ctx->pos += bytes_to_read;
-
- return bytes_to_read;
+ int n = std::min(buf_size, static_cast(ctx->size - ctx->pos));
+ memcpy(buf, ctx->data + ctx->pos, n);
+ ctx->pos += static_cast(n);
+ return n;
}
int64_t seek_packet(void *opaque, int64_t offset, int whence) {
- MemoryIOContext *ctx = static_cast(opaque);
-
+ auto *ctx = static_cast(opaque);
switch (whence) {
case SEEK_SET:
- ctx->pos = offset;
+ ctx->pos = static_cast(offset);
break;
case SEEK_CUR:
- ctx->pos += offset;
+ ctx->pos += static_cast(offset);
break;
case SEEK_END:
- ctx->pos = ctx->size + offset;
+ ctx->pos = ctx->size + static_cast(offset);
break;
case AVSEEK_SIZE:
- return ctx->size;
+ return static_cast(ctx->size);
+ default:
+ return AVERROR(EINVAL);
}
+ ctx->pos = std::min(ctx->pos, ctx->size);
+ return static_cast(ctx->pos);
+}
- if (ctx->pos > ctx->size) {
- ctx->pos = ctx->size;
+int findAudioStreamIndex(AVFormatContext *fmt_ctx) {
+ for (unsigned i = 0; i < fmt_ctx->nb_streams; i++) {
+ if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
+ return static_cast(i);
+ }
}
-
- return ctx->pos;
+ return -1;
}
-void convertFrameToBuffer(
- SwrContext *swr,
- AVFrame *frame,
- int output_channel_count,
- std::vector &buffer,
- size_t &framesRead,
- uint8_t **&resampled_data,
- int &max_resampled_samples) {
- const int out_samples = swr_get_out_samples(swr, frame->nb_samples);
- if (out_samples > max_resampled_samples) {
- av_freep(&resampled_data[0]);
- av_freep(&resampled_data);
- max_resampled_samples = out_samples;
-
- if (av_samples_alloc_array_and_samples(
- &resampled_data,
- nullptr,
- output_channel_count,
- max_resampled_samples,
- AV_SAMPLE_FMT_FLT,
- 0) < 0) {
- return;
- }
+bool openCodec(AVFormatContext *fmt_ctx, int &audio_stream_index, AVCodecContext **out_codec) {
+ audio_stream_index = findAudioStreamIndex(fmt_ctx);
+ if (audio_stream_index < 0) {
+ return false;
+ }
+ AVCodecParameters *codecpar = fmt_ctx->streams[audio_stream_index]->codecpar;
+ const AVCodec *codec = avcodec_find_decoder(codecpar->codec_id);
+ if (codec == nullptr) {
+ return false;
+ }
+ AVCodecContext *ctx = avcodec_alloc_context3(codec);
+ if (ctx == nullptr) {
+ return false;
+ }
+ if (avcodec_parameters_to_context(ctx, codecpar) < 0) {
+ avcodec_free_context(&ctx);
+ return false;
}
+ if (avcodec_open2(ctx, codec, nullptr) < 0) {
+ avcodec_free_context(&ctx);
+ return false;
+ }
+ *out_codec = ctx;
+ return true;
+}
- int converted_samples = swr_convert(
- swr,
- resampled_data,
- max_resampled_samples,
- const_cast(frame->data),
- frame->nb_samples);
+FFmpegDecoder::~FFmpegDecoder() {
+ close();
+}
- if (converted_samples > 0) {
- const size_t current_size = buffer.size();
- const size_t new_samples = static_cast(converted_samples) * output_channel_count;
- buffer.resize(current_size + new_samples);
- memcpy(buffer.data() + current_size, resampled_data[0], new_samples * sizeof(float));
- framesRead += converted_samples;
+void FFmpegDecoder::close() {
+ if (resampled_data_ != nullptr) {
+ av_freep(&resampled_data_[0]);
+ av_freep(&resampled_data_);
+ }
+ max_resampled_samples_ = 0;
+ if (swr_ != nullptr) {
+ swr_free(&swr_);
+ }
+ if (packet_ != nullptr) {
+ av_packet_free(&packet_);
+ }
+ if (frame_ != nullptr) {
+ av_frame_free(&frame_);
}
+ if (codec_ctx_ != nullptr) {
+ avcodec_free_context(&codec_ctx_);
+ }
+ if (fmt_ctx_ != nullptr) {
+ avformat_close_input(&fmt_ctx_);
+ }
+ if (avio_ctx_ != nullptr) {
+ avio_context_free(&avio_ctx_);
+ }
+ mem_io_.reset();
+ leftover_.clear();
+ leftover_offset_ = 0;
+ audio_stream_index_ = -1;
+ output_channels_ = 0;
+ output_sample_rate_ = 0;
+ total_output_frames_ = 0;
}
-std::vector readAllPcmFrames(
- AVFormatContext *fmt_ctx,
- AVCodecContext *codec_ctx,
- int out_sample_rate,
- int output_channel_count,
- int audio_stream_index,
- size_t &framesRead) {
- framesRead = 0;
- std::vector buffer;
- auto swr = std::unique_ptr>(
- swr_alloc(), [](SwrContext *ctx) { swr_free(&ctx); });
-
- if (swr == nullptr)
- return buffer;
-
- av_opt_set_chlayout(swr.get(), "in_chlayout", &codec_ctx->ch_layout, 0);
- av_opt_set_int(swr.get(), "in_sample_rate", codec_ctx->sample_rate, 0);
- av_opt_set_sample_fmt(swr.get(), "in_sample_fmt", codec_ctx->sample_fmt, 0);
-
- AVChannelLayout out_ch_layout;
- av_channel_layout_default(&out_ch_layout, output_channel_count);
- av_opt_set_chlayout(swr.get(), "out_chlayout", &out_ch_layout, 0);
- av_opt_set_int(swr.get(), "out_sample_rate", out_sample_rate, 0);
- av_opt_set_sample_fmt(swr.get(), "out_sample_fmt", AV_SAMPLE_FMT_FLT, 0);
-
- if (swr_init(swr.get()) < 0) {
- av_channel_layout_uninit(&out_ch_layout);
- return buffer;
- }
-
- auto packet = std::unique_ptr>(
- av_packet_alloc(), [](AVPacket *p) { av_packet_free(&p); });
- auto frame = std::unique_ptr>(
- av_frame_alloc(), [](AVFrame *p) { av_frame_free(&p); });
-
- if (packet == nullptr || frame == nullptr) {
- av_channel_layout_uninit(&out_ch_layout);
- return buffer;
- }
-
- // Allocate buffer for resampled data
- uint8_t **resampled_data = nullptr;
- int max_resampled_samples = 4096; // Initial size
+bool FFmpegDecoder::setupSwr() {
+ swr_ = swr_alloc();
+ if (swr_ == nullptr) {
+ return false;
+ }
+ av_opt_set_chlayout(swr_, "in_chlayout", &codec_ctx_->ch_layout, 0);
+ av_opt_set_int(swr_, "in_sample_rate", codec_ctx_->sample_rate, 0);
+ av_opt_set_sample_fmt(swr_, "in_sample_fmt", codec_ctx_->sample_fmt, 0);
+
+ AVChannelLayout out_layout;
+ av_channel_layout_default(&out_layout, output_channels_);
+ av_opt_set_chlayout(swr_, "out_chlayout", &out_layout, 0);
+ av_opt_set_int(swr_, "out_sample_rate", output_sample_rate_, 0);
+ av_opt_set_sample_fmt(swr_, "out_sample_fmt", AV_SAMPLE_FMT_FLT, 0);
+ if (swr_init(swr_) < 0) {
+ av_channel_layout_uninit(&out_layout);
+ return false;
+ }
+ av_channel_layout_uninit(&out_layout);
+
if (av_samples_alloc_array_and_samples(
- &resampled_data,
+ &resampled_data_,
nullptr,
- output_channel_count,
- max_resampled_samples,
+ output_channels_,
+ FFmpegDecoder::CHUNK_SIZE,
AV_SAMPLE_FMT_FLT,
0) < 0) {
- av_channel_layout_uninit(&out_ch_layout);
- return buffer;
- }
-
- while (av_read_frame(fmt_ctx, packet.get()) >= 0) {
- if (packet->stream_index == audio_stream_index) {
- if (avcodec_send_packet(codec_ctx, packet.get()) == 0) {
- while (avcodec_receive_frame(codec_ctx, frame.get()) == 0) {
- convertFrameToBuffer(
- swr.get(),
- frame.get(),
- output_channel_count,
- buffer,
- framesRead,
- resampled_data,
- max_resampled_samples);
- }
- }
- }
- av_packet_unref(packet.get());
- }
-
- // Flush decoder
- avcodec_send_packet(codec_ctx, nullptr);
- while (avcodec_receive_frame(codec_ctx, frame.get()) == 0) {
- convertFrameToBuffer(
- swr.get(),
- frame.get(),
- output_channel_count,
- buffer,
- framesRead,
- resampled_data,
- max_resampled_samples);
+ return false;
}
-
- av_freep(&resampled_data[0]);
- av_freep(&resampled_data);
- av_channel_layout_uninit(&out_ch_layout);
-
- return buffer;
+ max_resampled_samples_ = FFmpegDecoder::CHUNK_SIZE;
+ return true;
}
-inline int findAudioStreamIndex(AVFormatContext *fmt_ctx) {
- for (int i = 0; i < fmt_ctx->nb_streams; i++) {
- if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
- return i;
- }
+bool FFmpegDecoder::openFile(const FFmpegDecoderConfig &cfg, const std::string &path) {
+ close();
+ if (path.empty()) {
+ return false;
}
- return -1;
+ if (avformat_open_input(&fmt_ctx_, path.c_str(), nullptr, nullptr) < 0) {
+ fmt_ctx_ = nullptr;
+ return false;
+ }
+ if (avformat_find_stream_info(fmt_ctx_, nullptr) < 0) {
+ avformat_close_input(&fmt_ctx_);
+ fmt_ctx_ = nullptr;
+ return false;
+ }
+ if (!openCodec(fmt_ctx_, audio_stream_index_, &codec_ctx_)) {
+ avformat_close_input(&fmt_ctx_);
+ fmt_ctx_ = nullptr;
+ return false;
+ }
+ output_channels_ = codec_ctx_->ch_layout.nb_channels;
+ output_sample_rate_ =
+ (cfg.outputSampleRate > 0) ? cfg.outputSampleRate : codec_ctx_->sample_rate;
+
+ packet_ = av_packet_alloc();
+ frame_ = av_frame_alloc();
+ if (packet_ == nullptr || frame_ == nullptr || !setupSwr()) {
+ close();
+ return false;
+ }
+ total_output_frames_ = 0;
+ return true;
}
-bool setupDecoderContext(
- AVFormatContext *fmt_ctx,
- int &audio_stream_index,
- std::unique_ptr> &codec_ctx) {
- audio_stream_index = findAudioStreamIndex(fmt_ctx);
- if (audio_stream_index == -1) {
+bool FFmpegDecoder::openMemory(const FFmpegDecoderConfig &cfg, const void *data, size_t size) {
+ close();
+ if (data == nullptr || size == 0) {
return false;
}
-
- AVCodecParameters *codecpar = fmt_ctx->streams[audio_stream_index]->codecpar;
- const AVCodec *codec = avcodec_find_decoder(codecpar->codec_id);
- if (codec == nullptr) {
+ mem_io_ = std::make_unique();
+ mem_io_->data = static_cast(data);
+ mem_io_->size = size;
+ mem_io_->pos = 0;
+
+ auto* io_buf = static_cast(av_malloc(FFmpegDecoder::CHUNK_SIZE));
+ if (io_buf == nullptr) {
+ close();
+ return false;
+ }
+ avio_ctx_ = avio_alloc_context(
+ io_buf,
+ static_cast(FFmpegDecoder::CHUNK_SIZE),
+ 0,
+ mem_io_.get(),
+ read_packet,
+ nullptr,
+ seek_packet);
+ if (avio_ctx_ == nullptr) {
+ av_free(io_buf);
+ mem_io_.reset();
return false;
}
- AVCodecContext *raw_codec_ctx = avcodec_alloc_context3(codec);
- if (raw_codec_ctx == nullptr) {
+ fmt_ctx_ = avformat_alloc_context();
+ if (fmt_ctx_ == nullptr) {
+ close();
return false;
}
+ fmt_ctx_->pb = avio_ctx_;
- codec_ctx.reset(raw_codec_ctx);
- if (avcodec_parameters_to_context(codec_ctx.get(), codecpar) < 0) {
+ if (avformat_open_input(&fmt_ctx_, nullptr, nullptr, nullptr) < 0) {
+ close();
return false;
}
- if (avcodec_open2(codec_ctx.get(), codec, nullptr) < 0) {
+ if (avformat_find_stream_info(fmt_ctx_, nullptr) < 0) {
+ close();
return false;
}
-
+ if (!openCodec(fmt_ctx_, audio_stream_index_, &codec_ctx_)) {
+ close();
+ return false;
+ }
+ output_channels_ = codec_ctx_->ch_layout.nb_channels;
+ output_sample_rate_ =
+ (cfg.outputSampleRate > 0) ? cfg.outputSampleRate : codec_ctx_->sample_rate;
+
+ packet_ = av_packet_alloc();
+ frame_ = av_frame_alloc();
+ if (packet_ == nullptr || frame_ == nullptr || !setupSwr()) {
+ close();
+ return false;
+ }
+ total_output_frames_ = 0;
return true;
}
-std::shared_ptr decodeAudioFrames(
- AVFormatContext *fmt_ctx,
- AVCodecContext *codec_ctx,
- int audio_stream_index,
- int sample_rate) {
- size_t framesRead = 0;
- int output_sample_rate = (sample_rate > 0) ? sample_rate : codec_ctx->sample_rate;
- int output_channel_count = codec_ctx->ch_layout.nb_channels;
-
- std::vector decoded_buffer = readAllPcmFrames(
- fmt_ctx, codec_ctx, output_sample_rate, output_channel_count, audio_stream_index, framesRead);
-
- if (framesRead == 0 || decoded_buffer.empty()) {
- return nullptr;
+void FFmpegDecoder::appendFrameResampled(AVFrame *frame) {
+ int out_samples = swr_get_out_samples(swr_, frame->nb_samples);
+ if (out_samples > max_resampled_samples_) {
+ av_freep(&resampled_data_[0]);
+ av_freep(&resampled_data_);
+ max_resampled_samples_ = out_samples;
+ if (av_samples_alloc_array_and_samples(
+ &resampled_data_,
+ nullptr,
+ output_channels_,
+ max_resampled_samples_,
+ AV_SAMPLE_FMT_FLT,
+ 0) < 0) {
+ return;
+ }
+ }
+ int converted = swr_convert(
+ swr_,
+ resampled_data_,
+ max_resampled_samples_,
+ const_cast(frame->data),
+ frame->nb_samples);
+ if (converted > 0) {
+ size_t n = static_cast(converted) * static_cast(output_channels_);
+ const float *src = reinterpret_cast(resampled_data_[0]);
+ leftover_.insert(leftover_.end(), src, src + n);
}
+}
- auto outputFrames = decoded_buffer.size() / output_channel_count;
- auto audioBuffer =
- std::make_shared(outputFrames, output_channel_count, output_sample_rate);
+bool FFmpegDecoder::feedPipeline() {
+ for (;;) {
+ int r = avcodec_receive_frame(codec_ctx_, frame_);
+ if (r == 0) {
+ appendFrameResampled(frame_);
+ return true;
+ }
+ if (r == AVERROR_EOF) {
+ return !leftover_.empty();
+ }
+ if (r != AVERROR(EAGAIN)) {
+ return false;
+ }
- for (size_t ch = 0; ch < output_channel_count; ++ch) {
- auto channelData = audioBuffer->getChannel(ch)->span();
- for (int i = 0; i < outputFrames; ++i) {
- channelData[i] = decoded_buffer[i * output_channel_count + ch];
+ r = av_read_frame(fmt_ctx_, packet_);
+ if (r == AVERROR_EOF) {
+ if (avcodec_send_packet(codec_ctx_, nullptr) < 0) {
+ return false;
+ }
+ continue;
+ }
+ if (r < 0) {
+ return false;
+ }
+ if (packet_->stream_index != audio_stream_index_) {
+ av_packet_unref(packet_);
+ continue;
+ }
+ r = avcodec_send_packet(codec_ctx_, packet_);
+ av_packet_unref(packet_);
+ if (r < 0) {
+ return false;
}
}
- return audioBuffer;
}
-std::shared_ptr decodeWithMemoryBlock(const void *data, size_t size, int sample_rate) {
- if (data == nullptr || size == 0) {
- return nullptr;
+float FFmpegDecoder::getDurationInSeconds() const {
+ if (!isOpen() || fmt_ctx_ == nullptr || audio_stream_index_ < 0) {
+ return 0;
}
-
- MemoryIOContext io_ctx{static_cast(data), size, 0};
-
- constexpr size_t buffer_size = 4096;
- uint8_t *io_buffer = static_cast(av_malloc(buffer_size));
- if (io_buffer == nullptr) {
- return nullptr;
+ if (fmt_ctx_->duration != AV_NOPTS_VALUE && fmt_ctx_->duration >= 0) {
+ double t =
+ static_cast(fmt_ctx_->duration) / static_cast(AV_TIME_BASE);
+ if (t > 0 && std::isfinite(t)) {
+ return static_cast(t);
+ }
}
+ return 0;
+}
- auto avio_ctx = std::unique_ptr>(
- avio_alloc_context(io_buffer, buffer_size, 0, &io_ctx, read_packet, nullptr, seek_packet),
- [](AVIOContext *ctx) { avio_context_free(&ctx); });
- if (avio_ctx == nullptr) {
- return nullptr;
+float FFmpegDecoder::getCurrentPositionInSeconds() const {
+ if (!isOpen() || output_sample_rate_ <= 0) {
+ return 0;
}
+ return static_cast(total_output_frames_) / static_cast(output_sample_rate_);
+}
- AVFormatContext *raw_fmt_ctx = avformat_alloc_context();
- if (raw_fmt_ctx == nullptr) {
- return nullptr;
+// todo: offload this call to a separate thread because seeking decoder can take a while
+// current implementation suspends audio thread, which disable multiple playbacks
+bool FFmpegDecoder::seekToTime(double seconds) {
+ if (!isOpen() || audio_stream_index_ < 0 || output_sample_rate_ <= 0) {
+ return false;
}
- raw_fmt_ctx->pb = avio_ctx.get();
-
- if (avformat_open_input(&raw_fmt_ctx, nullptr, nullptr, nullptr) < 0) {
- avformat_free_context(raw_fmt_ctx);
- return nullptr;
+ float dur = getDurationInSeconds();
+ if (dur > 0 && std::isfinite(dur)) {
+ seconds = std::clamp(seconds, 0.0, static_cast(dur));
+ } else {
+ seconds = std::max(0.0, seconds);
+ if (!std::isfinite(seconds)) {
+ return false;
+ }
}
- auto fmt_ctx = std::unique_ptr(
- raw_fmt_ctx, &avformat_free_context);
+ auto ts = static_cast(seconds * static_cast(AV_TIME_BASE));
+ if (avformat_seek_file(fmt_ctx_, -1, INT64_MIN, ts, INT64_MAX, 0) < 0) {
+ return false;
+ }
+ avcodec_flush_buffers(codec_ctx_);
+ leftover_.clear();
+ leftover_offset_ = 0;
+ total_output_frames_ = static_cast(
+ std::llround(seconds * static_cast(output_sample_rate_)));
+ return true;
+}
- if (avformat_find_stream_info(fmt_ctx.get(), nullptr) < 0) {
- return nullptr;
+size_t FFmpegDecoder::readPcmFrames(float *outInterleaved, size_t frameCount) {
+ if (!isOpen() || outInterleaved == nullptr || frameCount == 0 || output_channels_ <= 0) {
+ return 0;
+ }
+ size_t delivered = 0;
+ const auto ch = static_cast(output_channels_);
+
+ while (delivered < frameCount) {
+ size_t need = frameCount - delivered;
+ size_t available_samples = leftover_.size() > leftover_offset_
+ ? leftover_.size() - leftover_offset_
+ : 0;
+ size_t leftover_frames = available_samples / ch;
+ if (leftover_frames > 0) {
+ size_t take = std::min(need, leftover_frames);
+ size_t samples = take * ch;
+ memcpy(
+ outInterleaved + delivered * ch,
+ leftover_.data() + leftover_offset_,
+ samples * sizeof(float));
+ leftover_offset_ += samples;
+ if (leftover_offset_ >= leftover_.size()) {
+ leftover_.clear();
+ leftover_offset_ = 0;
+ }
+ delivered += take;
+ } else if (!feedPipeline()) {
+ break;
+ }
}
+ total_output_frames_ += delivered;
+ return delivered;
+}
- auto codec_ctx = std::unique_ptr>(
- nullptr, [](AVCodecContext *ctx) { avcodec_free_context(&ctx); });
- int audio_stream_index = -1;
- if (!setupDecoderContext(fmt_ctx.get(), audio_stream_index, codec_ctx)) {
+static std::shared_ptr buildAudioBufferFromInterleaved(
+ std::vector &interleaved,
+ int channels,
+ int sample_rate) {
+ if (interleaved.empty() || channels <= 0) {
return nullptr;
}
-
- return decodeAudioFrames(fmt_ctx.get(), codec_ctx.get(), audio_stream_index, sample_rate);
+ size_t frames = interleaved.size() / static_cast(channels);
+ auto buf = std::make_shared(frames, channels, static_cast(sample_rate));
+ for (int c = 0; c < channels; ++c) {
+ auto span = buf->getChannel(c)->span();
+ for (size_t i = 0; i < frames; ++i) {
+ span[i] = interleaved[i * static_cast(channels) + static_cast(c)];
+ }
+ }
+ return buf;
}
std::shared_ptr decodeWithFilePath(const std::string &path, int sample_rate) {
- if (path.empty()) {
+ FFmpegDecoderConfig cfg;
+ ffmpegDecoderConfigInit(&cfg, sample_rate);
+ FFmpegDecoder dec;
+ if (!dec.openFile(cfg, path)) {
return nullptr;
}
+ std::vector acc;
+ std::vector tmp(FFmpegDecoder::CHUNK_SIZE * static_cast(std::max(1, dec.outputChannels())));
+ while (true) {
+ size_t n = dec.readPcmFrames(tmp.data(), FFmpegDecoder::CHUNK_SIZE);
+ if (n == 0) {
+ break;
+ }
+ acc.insert(
+ acc.end(),
+ tmp.begin(),
+ tmp.begin() + static_cast(n * static_cast(dec.outputChannels())));
+ }
+ return buildAudioBufferFromInterleaved(acc, dec.outputChannels(), dec.outputSampleRate());
+}
- AVFormatContext *raw_fmt_ctx = nullptr;
- if (avformat_open_input(&raw_fmt_ctx, path.c_str(), nullptr, nullptr) < 0)
+std::shared_ptr decodeWithMemoryBlock(const void *data, size_t size, int sample_rate) {
+ FFmpegDecoderConfig cfg;
+ ffmpegDecoderConfigInit(&cfg, sample_rate);
+ FFmpegDecoder dec;
+ if (!dec.openMemory(cfg, data, size)) {
return nullptr;
+ }
+ std::vector acc;
+ std::vector tmp(FFmpegDecoder::CHUNK_SIZE * static_cast(std::max(1, dec.outputChannels())));
+ while (true) {
+ size_t n = dec.readPcmFrames(tmp.data(), FFmpegDecoder::CHUNK_SIZE);
+ if (n == 0) {
+ break;
+ }
+ acc.insert(
+ acc.end(),
+ tmp.begin(),
+ tmp.begin() + static_cast(n * static_cast(dec.outputChannels())));
+ }
+ return buildAudioBufferFromInterleaved(acc, dec.outputChannels(), dec.outputSampleRate());
+}
- auto fmt_ctx = std::unique_ptr>(
- raw_fmt_ctx, [](AVFormatContext *ctx) { avformat_close_input(&ctx); });
+} // namespace audioapi::ffmpegdecoder
- if (avformat_find_stream_info(fmt_ctx.get(), nullptr) < 0) {
- return nullptr;
- }
+#else
- auto codec_ctx = std::unique_ptr>(
- nullptr, [](AVCodecContext *ctx) { avcodec_free_context(&ctx); });
- int audio_stream_index = -1;
- if (!setupDecoderContext(fmt_ctx.get(), audio_stream_index, codec_ctx)) {
- return nullptr;
- }
+#include
- return decodeAudioFrames(fmt_ctx.get(), codec_ctx.get(), audio_stream_index, sample_rate);
+namespace audioapi::ffmpegdecoder {
+
+FFmpegDecoder::FFmpegDecoder(FFmpegDecoder &&) noexcept = default;
+FFmpegDecoder &FFmpegDecoder::operator=(FFmpegDecoder &&) noexcept = default;
+FFmpegDecoder::~FFmpegDecoder() = default;
+void FFmpegDecoder::close() {}
+bool FFmpegDecoder::openFile(const FFmpegDecoderConfig &, const std::string &) {
+ return false;
+}
+bool FFmpegDecoder::openMemory(const FFmpegDecoderConfig &, const void *, size_t) {
+ return false;
+}
+float FFmpegDecoder::getDurationInSeconds() const {
+ return 0;
+}
+float FFmpegDecoder::getCurrentPositionInSeconds() const {
+ return 0;
+}
+bool FFmpegDecoder::seekToTime(double) {
+ return false;
+}
+size_t FFmpegDecoder::readPcmFrames(float *, size_t) {
+ return 0;
+}
+std::shared_ptr decodeWithFilePath(const std::string &, int) {
+ return nullptr;
+}
+std::shared_ptr decodeWithMemoryBlock(const void *, size_t, int) {
+ return nullptr;
}
} // namespace audioapi::ffmpegdecoder
+
+#endif // !RN_AUDIO_API_FFMPEG_DISABLED
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h b/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h
index 46eea8f1d..fb291d55e 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h
@@ -8,8 +8,12 @@
* FFmpeg, you must comply with the terms of the LGPL for FFmpeg itself.
*/
+#pragma once
+
#include
+#include
#include
+#include
#include
extern "C" {
@@ -18,53 +22,116 @@ extern "C" {
#include
#include
}
+
class AudioBuffer;
namespace audioapi::ffmpegdecoder {
-// Custom IO context for reading from memory
+
+/// Opaque IO state for openMemory (must outlive decode until close).
struct MemoryIOContext {
- const uint8_t *data;
- size_t size;
- size_t pos;
+ const uint8_t *data = nullptr;
+ size_t size = 0;
+ size_t pos = 0;
};
-struct AudioStreamContext {
- AVFormatContext *fmt_ctx = nullptr;
- AVCodecContext *codec_ctx = nullptr;
- int audio_stream_index = -1;
+/// Step 1 — like ma_decoder_config_init: desired output sample rate (0 = use stream rate).
+struct FFmpegDecoderConfig {
+ int outputSampleRate = 0;
};
-int read_packet(void *opaque, uint8_t *buf, int buf_size);
-int64_t seek_packet(void *opaque, int64_t offset, int whence);
-inline int findAudioStreamIndex(AVFormatContext *fmt_ctx);
-std::vector readAllPcmFrames(
- AVFormatContext *fmt_ctx,
- AVCodecContext *codec_ctx,
- int out_sample_rate,
- int output_channel_count,
- int audio_stream_index,
- size_t &framesRead);
-
-void convertFrameToBuffer(
- SwrContext *swr,
- AVFrame *frame,
- int output_channel_count,
- std::vector &buffer,
- size_t &framesRead,
- uint8_t **&resampled_data,
- int &max_resampled_samples);
-bool setupDecoderContext(
- AVFormatContext *fmt_ctx,
- int &audio_stream_index,
- std::unique_ptr &codec_ctx);
-std::shared_ptr decodeAudioFrames(
- AVFormatContext *fmt_ctx,
- AVCodecContext *codec_ctx,
- int audio_stream_index,
- int sample_rate);
+/// Initialize decoder config (mirrors miniaudio-style config step).
+inline void ffmpegDecoderConfigInit(FFmpegDecoderConfig *cfg, int outputSampleRate) {
+ if (cfg != nullptr) {
+ cfg->outputSampleRate = outputSampleRate;
+ }
+}
-std::shared_ptr decodeWithMemoryBlock(const void *data, size_t size, int sample_rate);
+/**
+ * FFmpeg decoder with incremental read, analogous to ma_decoder:
+ * 1) ffmpegDecoderConfigInit
+ * 2) openFile or openMemory
+ * 3) readPcmFrames repeatedly; 0 returned = end of stream
+ * 4) close when done
+ *
+ * For openMemory, \p data must remain valid until close().
+ */
+class FFmpegDecoder {
+ public:
+ FFmpegDecoder() = default;
+ FFmpegDecoder(const FFmpegDecoder &) = delete;
+ FFmpegDecoder &operator=(const FFmpegDecoder &) = delete;
+ FFmpegDecoder(FFmpegDecoder &&other) = delete;
+ FFmpegDecoder &operator=(FFmpegDecoder &&other) = delete;
+ ~FFmpegDecoder();
+
+ /// @brief Opens a file for decoding.
+ /// @param cfg The configuration for the decoder.
+ /// @param path The path to the file.
+ /// @return True if the file was opened successfully, false otherwise.
+ [[nodiscard]] bool openFile(const FFmpegDecoderConfig &cfg, const std::string &path);
+
+ /// @brief Opens a memory block for decoding.
+ /// @param cfg The configuration for the decoder.
+ /// @param data The data to decode.
+ /// @param size The size of the data.
+ /// @return True if the memory block was opened successfully, false otherwise.
+ [[nodiscard]] bool openMemory(const FFmpegDecoderConfig &cfg, const void *data, size_t size);
+
+ /// @brief Reads frames from the decoder.
+ /// @param outInterleaved The output buffer for the frames.
+ /// @param frameCount The maximum number of frames to read.
+ /// @return The number of frames actually read (0 = EOF).
+ [[nodiscard]] size_t readPcmFrames(float *outInterleaved, size_t frameCount);
+
+ /// @brief Closes the decoder.
+ void close();
+
+ /// @brief Checks if the decoder is open.
+ /// @return True if the decoder is open, false otherwise.
+ [[nodiscard]] bool isOpen() const { return fmt_ctx_ != nullptr && codec_ctx_ != nullptr; }
+ [[nodiscard]] int outputChannels() const { return output_channels_; }
+ [[nodiscard]] int outputSampleRate() const { return output_sample_rate_; }
+ /// @brief Duration in seconds. Returns 0 if unknown.
+ [[nodiscard]] float getDurationInSeconds() const;
+
+ /// @brief Current playback position in seconds (frames read / sample rate).
+ [[nodiscard]] float getCurrentPositionInSeconds() const;
+
+ /// @brief Seeks to a playback position in seconds (output / resampled timeline).
+ /// @return True if seek succeeded.
+ [[nodiscard]] bool seekToTime(double seconds);
+
+ static constexpr size_t CHUNK_SIZE = 4096;
+
+ private:
+ bool setupSwr();
+ bool feedPipeline();
+ void appendFrameResampled(AVFrame *frame);
+
+ AVFormatContext *fmt_ctx_ = nullptr;
+ AVCodecContext *codec_ctx_ = nullptr;
+ SwrContext *swr_ = nullptr;
+ AVPacket *packet_ = nullptr;
+ AVFrame *frame_ = nullptr;
+
+ uint8_t **resampled_data_ = nullptr;
+ int max_resampled_samples_ = 0;
+
+ std::unique_ptr mem_io_;
+ AVIOContext *avio_ctx_ = nullptr;
+
+ std::vector leftover_;
+ size_t leftover_offset_ = 0;
+ int audio_stream_index_ = -1;
+ int output_channels_ = 0;
+ int output_sample_rate_ = 0;
+ size_t total_output_frames_ = 0;
+};
+
+// --- One-shot decode (existing API) ----------------------------------------
+
+std::shared_ptr decodeWithMemoryBlock(const void *data, size_t size, int sample_rate);
std::shared_ptr decodeWithFilePath(const std::string &path, int sample_rate);
} // namespace audioapi::ffmpegdecoder
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/types/NodeOptions.h b/packages/react-native-audio-api/common/cpp/audioapi/types/NodeOptions.h
index b2df4a479..0174c1434 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/types/NodeOptions.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/types/NodeOptions.h
@@ -111,6 +111,11 @@ struct AudioBufferSourceOptions : BaseAudioBufferSourceOptions {
}
};
+struct AudioFileSourceOptions : AudioScheduledSourceNodeOptions {
+ std::vector data;
+ std::string filePath;
+};
+
struct StreamerOptions : AudioScheduledSourceNodeOptions {
std::string streamPath;
};
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/utils/TaskOffloader.hpp b/packages/react-native-audio-api/common/cpp/audioapi/utils/TaskOffloader.hpp
index ffec856be..85de638a7 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/utils/TaskOffloader.hpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/utils/TaskOffloader.hpp
@@ -56,7 +56,6 @@ class TaskOffloader {
Sender sender_;
std::thread workerThread_;
std::atomic shouldRun_;
- bool taskOffloaded_;
};
} // namespace audioapi::task_offloader
diff --git a/packages/react-native-audio-api/common/cpp/cursor/CMakeLists.txt b/packages/react-native-audio-api/common/cpp/cursor/CMakeLists.txt
index bd9378edc..f8a3da551 100644
--- a/packages/react-native-audio-api/common/cpp/cursor/CMakeLists.txt
+++ b/packages/react-native-audio-api/common/cpp/cursor/CMakeLists.txt
@@ -16,7 +16,7 @@ list(REMOVE_ITEM COMMON_CPP_SOURCES
)
set(INCLUDE_DIR ${COMMON_CPP_DIR}/audioapi/external/include)
-set(FFMPEG_INCLUDE_DIR ${COMMON_CPP_DIR}/audioapi/external/ffmpeg_include)
+set(FFMPEG_INCLUDE_DIR ${COMMON_CPP_DIR}/audioapi/external/include_ffmpeg)
set(EXTERNAL_DIR ${COMMON_CPP_DIR}/audioapi/external)
set(JNI_LIBS_DIR ${COMMON_CPP_DIR}/../../android/src/main/jniLibs)
diff --git a/packages/react-native-audio-api/common/cpp/test/CMakeLists.txt b/packages/react-native-audio-api/common/cpp/test/CMakeLists.txt
index b363b9631..a010be6a5 100644
--- a/packages/react-native-audio-api/common/cpp/test/CMakeLists.txt
+++ b/packages/react-native-audio-api/common/cpp/test/CMakeLists.txt
@@ -38,6 +38,7 @@ list(FILTER RNAUDIOAPI_SRC EXCLUDE REGEX ".*/Worklet.*Node\\.cpp$")
list(REMOVE_ITEM RNAUDIOAPI_SRC
"${REACT_NATIVE_AUDIO_API_DIR}/common/cpp/audioapi/core/AudioContext.cpp"
"${REACT_NATIVE_AUDIO_API_DIR}/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp"
+ "${REACT_NATIVE_AUDIO_API_DIR}/common/cpp/audioapi/core/sources/AudioFileSourceNode.cpp"
)
file(GLOB_RECURSE RNAUDIOAPI_LIBS
diff --git a/packages/react-native-audio-api/src/development/react/Audio/Audio.tsx b/packages/react-native-audio-api/src/development/react/Audio/Audio.tsx
index 0a95bf409..4c7020f55 100644
--- a/packages/react-native-audio-api/src/development/react/Audio/Audio.tsx
+++ b/packages/react-native-audio-api/src/development/react/Audio/Audio.tsx
@@ -1,9 +1,25 @@
-import React from 'react';
+import React, {
+ useCallback,
+ useEffect,
+ useMemo,
+ useRef,
+ useState,
+} from 'react';
+import { View } from 'react-native';
-import type { AudioProps } from './types';
+import type {
+ AudioProps,
+ AudioTagPlaybackState,
+ AudioURISource,
+} from './types';
import { useStableAudioProps } from './utils';
+import { AudioComponentContext } from './AudioTagContext';
+import AudioControls from './AudioControls';
+import { AudioFileSourceNode } from './AudioFileSourceNode';
const Audio: React.FC = (inProps) => {
+ const { children } = inProps;
+
/* eslint-disable @typescript-eslint/no-unused-vars */
const {
autoPlay,
@@ -15,10 +31,212 @@ const Audio: React.FC = (inProps) => {
playbackRate,
preservesPitch,
volume,
+ context,
} = useStableAudioProps(inProps);
- /* eslint-enable @typescript-eslint/no-unused-vars */
- return null;
+ const path = useMemo(() => {
+ if (typeof source === 'string') {
+ return source;
+ }
+ return (source as AudioURISource).uri ?? '';
+ }, [source]);
+
+ const fileSourceHandleRef = useRef(new AudioFileSourceNode());
+ const loadedSourceRef = useRef(null);
+ const loopRef = useRef(loop);
+ const effectiveVolumeRef = useRef(muted ? 0 : volume);
+ const contextRef = useRef(context);
+
+ const handlePlaybackEndedRef = useRef<() => void>(() => {});
+
+ const [volumeState, setVolumeState] = useState(volume);
+ const [mutedState, setMutedState] = useState(muted);
+ const [isReady, setIsReady] = useState(false);
+ const [playbackState, setPlaybackState] =
+ useState('idle');
+ const [currentTime, setCurrentTime] = useState(0);
+ const [duration, setDuration] = useState(0);
+
+ useEffect(() => {
+ setVolumeState(volume);
+ }, [volume]);
+
+ useEffect(() => {
+ setMutedState(muted);
+ }, [muted]);
+
+ useEffect(() => {
+ effectiveVolumeRef.current = mutedState ? 0 : volumeState;
+ }, [mutedState, volumeState]);
+
+ handlePlaybackEndedRef.current = () => {
+ setPlaybackState('idle');
+ const ctx = contextRef.current;
+ const src = loadedSourceRef.current;
+ if (!ctx || !src) {
+ return;
+ }
+ const handle = fileSourceHandleRef.current;
+ const { duration: d } = handle.attach(ctx.context.createFileSource(src), {
+ loop: loopRef.current,
+ onEnded: () => handlePlaybackEndedRef.current(),
+ });
+ setDuration(d);
+ handle.setVolume(effectiveVolumeRef.current);
+ handle.setLoop(loopRef.current);
+ };
+
+ const play = useCallback(() => {
+ if (!context) {
+ return;
+ }
+ fileSourceHandleRef.current.play(context);
+ setPlaybackState('playing');
+ }, [context]);
+
+ const pause = useCallback(() => {
+ fileSourceHandleRef.current.pause();
+ setPlaybackState('paused');
+ }, []);
+
+ const seekToTime = useCallback(
+ (seconds: number) => {
+ fileSourceHandleRef.current.seekToTime(seconds);
+ const d = duration;
+ const t =
+ d > 0 ? Math.max(0, Math.min(seconds, d)) : Math.max(0, seconds);
+ setCurrentTime(t);
+ },
+ [duration]
+ );
+
+ const attachNode = useCallback(
+ (sourceArg: ArrayBuffer | string) => {
+ if (!context) {
+ return;
+ }
+ loadedSourceRef.current = sourceArg;
+ const handle = fileSourceHandleRef.current;
+ const { duration: d } = handle.attach(
+ context.context.createFileSource(sourceArg),
+ {
+ loop: loopRef.current,
+ onEnded: () => {
+ handlePlaybackEndedRef.current();
+ setCurrentTime(d);
+ },
+ }
+ );
+ setDuration(d);
+ setIsReady(true);
+ handle.setVolume(effectiveVolumeRef.current);
+ handle.setLoop(loopRef.current);
+ if (autoPlay) {
+ play();
+ }
+ },
+ [autoPlay, play, context]
+ );
+
+ useEffect(() => {
+ if (!context || !path) {
+ return;
+ }
+
+ const fileSourceHandle = fileSourceHandleRef.current;
+
+ const run = async () => {
+ if (path.startsWith('http')) {
+ const response = await fetch(path);
+ const arrayBuffer = await response.arrayBuffer();
+ attachNode(arrayBuffer);
+ } else {
+ attachNode(path);
+ }
+ };
+
+ setIsReady(false);
+ setPlaybackState('idle');
+ run();
+
+ return () => {
+ fileSourceHandle.dispose();
+ loadedSourceRef.current = null;
+ setIsReady(false);
+ setPlaybackState('idle');
+ };
+ }, [path, context, attachNode]);
+
+ useEffect(() => {
+ fileSourceHandleRef.current.setVolume(mutedState ? 0 : volumeState);
+ }, [volumeState, mutedState]);
+
+ useEffect(() => {
+ fileSourceHandleRef.current.setLoop(loop);
+ }, [loop]);
+
+ useEffect(() => {
+ if (playbackState !== 'playing') {
+ return;
+ }
+
+ const handle = fileSourceHandleRef.current;
+ handle.startPositionTracking(setCurrentTime);
+
+ return () => {
+ handle.stopPositionTracking();
+ };
+ }, [playbackState]);
+
+ const setVolume = useCallback((next: number) => {
+ setVolumeState(next);
+ }, []);
+
+ const setMuted = useCallback((next: boolean) => {
+ setMutedState(next);
+ }, []);
+
+ const ctxValue = useMemo(
+ () => ({
+ play,
+ pause,
+ seekToTime,
+ volume: volumeState,
+ setVolume,
+ muted: mutedState,
+ setMuted,
+ isReady,
+ playbackState,
+ currentTime,
+ duration,
+ }),
+ [
+ play,
+ pause,
+ seekToTime,
+ setVolume,
+ volumeState,
+ mutedState,
+ setMuted,
+ isReady,
+ playbackState,
+ currentTime,
+ duration,
+ ]
+ );
+
+ if (context == null) {
+ return null;
+ }
+
+ return (
+
+
+ {controls && }
+ {children}
+
+
+ );
};
export default Audio;
diff --git a/packages/react-native-audio-api/src/development/react/Audio/Audio.web.tsx b/packages/react-native-audio-api/src/development/react/Audio/Audio.web.tsx
index be6453af0..a718f8fcd 100644
--- a/packages/react-native-audio-api/src/development/react/Audio/Audio.web.tsx
+++ b/packages/react-native-audio-api/src/development/react/Audio/Audio.web.tsx
@@ -1,9 +1,175 @@
-import React from 'react';
-import type { AudioProps } from './types';
+import React, {
+ useCallback,
+ useEffect,
+ useMemo,
+ useRef,
+ useState,
+} from 'react';
+import type { AudioProps, AudioTagPlaybackState } from './types';
+import { AudioComponentContext } from './AudioTagContext';
+import { useStableAudioProps } from './utils';
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const Audio: React.FC = (props) => {
- return null;
+ const { children } = props;
+ /* eslint-disable @typescript-eslint/no-unused-vars */
+ const {
+ autoPlay,
+ controls,
+ loop,
+ muted,
+ preload,
+ source,
+ playbackRate,
+ preservesPitch,
+ volume,
+ } = useStableAudioProps(props);
+
+ const audioRef = useRef(null);
+ const [volumeState, setVolumeState] = useState(volume);
+ const [mutedState, setMutedState] = useState(muted);
+ const [isReady, setIsReady] = useState(false);
+ const [playbackState, setPlaybackState] =
+ useState('idle');
+ const [currentTime, setCurrentTime] = useState(0);
+ const [duration, setDuration] = useState(0);
+
+ useEffect(() => {
+ setVolumeState(volume);
+ }, [volume]);
+
+ useEffect(() => {
+ setMutedState(muted);
+ }, [muted]);
+
+ useEffect(() => {
+ const el = audioRef.current;
+ if (el) {
+ el.volume = volumeState;
+ }
+ }, [volumeState]);
+
+ useEffect(() => {
+ const el = audioRef.current;
+ if (!el) return;
+ const onLoadedMetadata = () => {
+ setDuration(el.duration);
+ setCurrentTime(el.currentTime);
+ };
+ const onTimeUpdate = () => setCurrentTime(el.currentTime);
+ el.addEventListener('loadedmetadata', onLoadedMetadata);
+ el.addEventListener('timeupdate', onTimeUpdate);
+ if (!isNaN(el.duration)) setDuration(el.duration);
+ setCurrentTime(el.currentTime);
+ return () => {
+ el.removeEventListener('loadedmetadata', onLoadedMetadata);
+ el.removeEventListener('timeupdate', onTimeUpdate);
+ };
+ }, [isReady]);
+
+ useEffect(() => {
+ const el = audioRef.current;
+ if (el) {
+ el.muted = mutedState;
+ }
+ }, [mutedState]);
+
+ const play = useCallback(() => {
+ audioRef.current?.play()?.catch(() => {});
+ }, []);
+
+ const pause = useCallback(() => {
+ audioRef.current?.pause();
+ }, []);
+
+ const seekToTime = useCallback(
+ (seconds: number) => {
+ const el = audioRef.current;
+ if (!el) {
+ return;
+ }
+ const d = duration;
+ const t =
+ d > 0 && Number.isFinite(d)
+ ? Math.max(0, Math.min(seconds, d))
+ : Math.max(0, seconds);
+ if (Number.isFinite(t)) {
+ el.currentTime = t;
+ setCurrentTime(t);
+ }
+ },
+ [duration]
+ );
+
+ const rewind = useCallback(() => {
+ seekToTime(0);
+ }, [seekToTime]);
+
+ const setVolume = useCallback((next: number) => {
+ setVolumeState(next);
+ const el = audioRef.current;
+ if (el) {
+ el.volume = next;
+ }
+ }, []);
+
+ const setMuted = useCallback((next: boolean) => {
+ setMutedState(next);
+ const el = audioRef.current;
+ if (el) {
+ el.muted = next;
+ }
+ }, []);
+
+ const ctxValue = useMemo(
+ () => ({
+ play,
+ pause,
+ rewind,
+ seekToTime,
+ volume: volumeState,
+ setVolume,
+ muted: mutedState,
+ setMuted,
+ isReady,
+ playbackState,
+ currentTime,
+ duration,
+ }),
+ [
+ play,
+ pause,
+ rewind,
+ seekToTime,
+ setVolume,
+ volumeState,
+ mutedState,
+ setMuted,
+ isReady,
+ playbackState,
+ currentTime,
+ duration,
+ ]
+ );
+
+ return (
+
+
+ );
};
export default Audio;
diff --git a/packages/react-native-audio-api/src/development/react/Audio/AudioControls.tsx b/packages/react-native-audio-api/src/development/react/Audio/AudioControls.tsx
new file mode 100644
index 000000000..2427fafa1
--- /dev/null
+++ b/packages/react-native-audio-api/src/development/react/Audio/AudioControls.tsx
@@ -0,0 +1,346 @@
+import { Pause, Play, Volume, VolumeX } from 'lucide-react-native';
+import React, { useCallback, useMemo, useRef, useState } from 'react';
+import {
+ ActivityIndicator,
+ LayoutChangeEvent,
+ Platform,
+ Pressable,
+ StyleSheet,
+ Text,
+ View,
+} from 'react-native';
+import { Gesture, GestureDetector } from 'react-native-gesture-handler';
+import { scheduleOnRN } from 'react-native-worklets';
+import Animated, {
+ useAnimatedStyle,
+ useSharedValue,
+ withTiming,
+} from 'react-native-reanimated';
+import { useAudioTagContext } from './AudioTagContext';
+
+function formatTime(seconds: number): string {
+ if (!Number.isFinite(seconds) || seconds < 0) return '0:00';
+ const h = Math.floor(seconds / 3600);
+ const m = Math.floor((seconds % 3600) / 60);
+ const s = Math.floor(seconds % 60);
+ if (h > 0) {
+ return `${h}:${m.toString().padStart(2, '0')}:${s.toString().padStart(2, '0')}`;
+ } else {
+ return `${m}:${s.toString().padStart(2, '0')}`;
+ }
+}
+
+function timeFromLocationX(
+ locationX: number,
+ trackWidth: number,
+ durationSeconds: number
+): number {
+ if (trackWidth <= 0 || durationSeconds <= 0) {
+ return 0;
+ }
+ const pct = Math.max(0, Math.min(1, locationX / trackWidth));
+ return pct * durationSeconds;
+}
+
+const TRACK_BAR_HEIGHT = 12;
+const TRACK_BAR_HEIGHT_PRESSED = 18;
+const TRACK_BAR_ANIM_MS = 150;
+const NATIVE_DELAY_MS = 10; // delay between native calls so audio engine can catch up
+const SCRUB_PAN_MIN_DISTANCE = 8;
+
+function useExpandableTrackHeight() {
+ const height = useSharedValue(TRACK_BAR_HEIGHT);
+ const animatedStyle = useAnimatedStyle(() => ({
+ height: height.value,
+ borderRadius: height.value / 2,
+ }));
+
+ const expand = () => {
+ height.value = withTiming(TRACK_BAR_HEIGHT_PRESSED, {
+ duration: TRACK_BAR_ANIM_MS,
+ });
+ };
+
+ const collapse = () => {
+ height.value = withTiming(TRACK_BAR_HEIGHT, {
+ duration: TRACK_BAR_ANIM_MS,
+ });
+ };
+
+ return { animatedStyle, expand, collapse };
+}
+
+const AudioControls: React.FC = () => {
+ const {
+ isReady,
+ play,
+ pause,
+ seekToTime,
+ playbackState,
+ muted,
+ setMuted,
+ currentTime,
+ duration,
+ } = useAudioTagContext();
+
+ const [progressWidth, setProgressWidth] = useState(0);
+ const [scrubTime, setScrubTime] = useState(null);
+
+ const progressTrackAnim = useExpandableTrackHeight();
+
+ const progressTrackRef = useRef(null);
+ const progressMetricsRef = useRef({ left: 0, width: 0 });
+ const progressWidthRef = useRef(0);
+ const durationRef = useRef(duration);
+ const wasPlayingBeforeScrubRef = useRef(false);
+ durationRef.current = duration;
+ progressWidthRef.current = progressWidth;
+
+ const onStart = useCallback(
+ (x: number) => {
+ progressTrackAnim.expand();
+ const d = durationRef.current;
+ if (playbackState === 'playing') {
+ wasPlayingBeforeScrubRef.current = true;
+ pause();
+ } else {
+ wasPlayingBeforeScrubRef.current = false;
+ }
+ progressTrackRef.current?.measureInWindow((left, _y, width, _h) => {
+ progressMetricsRef.current.left = left;
+ progressMetricsRef.current.width = width;
+ setScrubTime(
+ timeFromLocationX(x, width || progressWidthRef.current, d)
+ );
+ });
+ },
+ [playbackState, pause, progressTrackAnim]
+ );
+
+ const onUpdate = useCallback((x: number) => {
+ const d = durationRef.current;
+ const w = progressMetricsRef.current.width || progressWidthRef.current;
+ setScrubTime(timeFromLocationX(x, w, d));
+ }, []);
+
+ const seekTo = useCallback(
+ (x: number) => {
+ const d = durationRef.current;
+ const w = progressMetricsRef.current.width || progressWidthRef.current;
+ const t = timeFromLocationX(x, w, d);
+ seekToTime(t);
+ },
+ [seekToTime]
+ );
+
+ const onEnd = useCallback(
+ (x: number, flag: boolean = true) => {
+ if (flag) {
+ seekTo(x);
+ }
+ progressTrackAnim.collapse();
+ setScrubTime(null);
+ if (wasPlayingBeforeScrubRef.current) {
+ setTimeout(() => {
+ play();
+ }, NATIVE_DELAY_MS);
+ }
+ },
+ [play, progressTrackAnim, seekTo]
+ );
+
+ const onCancel = useCallback(() => {
+ progressTrackAnim.collapse();
+ setScrubTime(null);
+ }, [progressTrackAnim]);
+
+ const onTapSeek = useCallback(
+ (x: number) => {
+ if (playbackState === 'playing') {
+ wasPlayingBeforeScrubRef.current = true;
+ pause();
+ } else {
+ wasPlayingBeforeScrubRef.current = false;
+ }
+ setTimeout(() => {
+ seekTo(x);
+ setTimeout(() => {
+ onEnd(x, false);
+ }, NATIVE_DELAY_MS);
+ }, NATIVE_DELAY_MS);
+ },
+ [seekTo, onEnd, playbackState, pause]
+ );
+
+ const scrubGesture = useMemo(() => {
+ const panGesture = Gesture.Pan()
+ .minDistance(SCRUB_PAN_MIN_DISTANCE)
+ .onStart((e) => {
+ scheduleOnRN(onStart, e.x);
+ })
+ .onUpdate((e) => {
+ scheduleOnRN(onUpdate, e.x);
+ })
+ .onEnd((e) => {
+ scheduleOnRN(onEnd, e.x);
+ })
+ .onFinalize((_e, success) => {
+ if (!success) {
+ scheduleOnRN(onCancel);
+ }
+ });
+
+ const tapGesture = Gesture.Tap()
+ .maxDistance(14)
+ .onEnd((e, success) => {
+ if (success) {
+ scheduleOnRN(onTapSeek, e.x);
+ }
+ });
+
+ return Gesture.Race(panGesture, tapGesture);
+ }, [onStart, onUpdate, onEnd, onCancel, onTapSeek]);
+
+ const onPlayPausePress = () => {
+ if (playbackState === 'playing') {
+ pause();
+ } else {
+ play();
+ }
+ };
+
+ const onProgressTrackLayout = (e: LayoutChangeEvent) => {
+ const w = e.nativeEvent.layout.width;
+ setProgressWidth(w);
+ progressWidthRef.current = w;
+ progressTrackRef.current?.measureInWindow((left, _y, width, _h) => {
+ progressMetricsRef.current.left = left;
+ progressMetricsRef.current.width = width;
+ });
+ };
+
+ if (!isReady) {
+ return (
+
+
+
+ Loading…
+
+
+ );
+ }
+
+ const displayTime = scrubTime ?? currentTime;
+ const progress = duration > 0 ? displayTime / duration : 0;
+
+ return (
+
+
+
+ {playbackState === 'playing' ? (
+
+ ) : (
+
+ )}
+
+
+
+ {formatTime(displayTime)} / {formatTime(duration)}
+
+
+
+ {/* prettier-ignore */}
+
+
+
+
+
+
+
+ setMuted(!muted)}>
+ {muted ? (
+
+ ) : (
+
+ )}
+
+
+
+ );
+};
+
+const styles = StyleSheet.create({
+ container: {
+ flexDirection: 'column',
+ alignSelf: 'stretch',
+ minWidth: 200,
+ paddingVertical: 10,
+ paddingHorizontal: 12,
+ backgroundColor: '#f5f5f5',
+ borderRadius: 8,
+ borderWidth: 1,
+ borderColor: '#333',
+ ...Platform.select({
+ ios: {
+ shadowColor: '#000',
+ shadowOffset: { width: 0, height: 2 },
+ shadowOpacity: 0.15,
+ shadowRadius: 4,
+ },
+ android: {
+ elevation: 4,
+ },
+ }),
+ },
+ topRow: {
+ flexDirection: 'row',
+ alignItems: 'center',
+ },
+ playPause: {
+ padding: 4,
+ marginRight: 12,
+ },
+ timeText: {
+ color: '#000',
+ fontSize: 12,
+ marginRight: 10,
+ minWidth: 48,
+ },
+ progressTrack: {
+ flex: 1,
+ minWidth: 40,
+ justifyContent: 'center',
+ marginRight: 10,
+ },
+ trackInner: {
+ alignSelf: 'stretch',
+ backgroundColor: '#ccc',
+ overflow: 'hidden',
+ },
+ trackFill: {
+ height: '100%',
+ backgroundColor: '#000',
+ },
+ volumeIcon: {
+ padding: 4,
+ marginRight: 12,
+ },
+ loadingRow: {
+ flexDirection: 'row',
+ alignItems: 'center',
+ },
+ loadingText: {
+ color: '#333',
+ fontSize: 14,
+ },
+});
+
+export default AudioControls;
diff --git a/packages/react-native-audio-api/src/development/react/Audio/AudioFileSourceNode.ts b/packages/react-native-audio-api/src/development/react/Audio/AudioFileSourceNode.ts
new file mode 100644
index 000000000..d7670695f
--- /dev/null
+++ b/packages/react-native-audio-api/src/development/react/Audio/AudioFileSourceNode.ts
@@ -0,0 +1,125 @@
+import { AudioEventEmitter, AudioEventSubscription } from '../../../events';
+import type { EventEmptyType } from '../../../events/types';
+import type BaseAudioContext from '../../../core/BaseAudioContext';
+import type { IAudioFileSourceNode } from '../../../interfaces';
+
+type AttachFileSourceOptions = {
+ loop: boolean;
+ onEnded: () => void;
+};
+
+export class AudioFileSourceNode {
+ private readonly emitter = new AudioEventEmitter(global.AudioEventEmitter);
+
+ private node: IAudioFileSourceNode | null = null;
+ private didConnectToDestination = false;
+ private positionSubscription?: AudioEventSubscription;
+ private endedSubscription?: AudioEventSubscription;
+
+ attach(
+ fileSource: IAudioFileSourceNode,
+ options: AttachFileSourceOptions
+ ): { duration: number } {
+ this.resetNodeAndSubscriptions();
+ this.node = fileSource;
+ this.node.loop = options.loop;
+
+ this.endedSubscription = this.emitter.addAudioEventListener(
+ 'ended',
+ (_event: EventEmptyType) => {
+ options.onEnded();
+ }
+ );
+ this.node.onEnded = this.endedSubscription.subscriptionId;
+
+ return {
+ duration: this.node.duration,
+ };
+ }
+
+ dispose(): void {
+ this.resetNodeAndSubscriptions();
+ }
+
+ /**
+ * First call: connect to destination + start. Later calls on the same node
+ * (e.g. resume after pause): only start — avoids duplicate edges and matches
+ * native file-source resume (unpause) semantics.
+ */
+ play(baseContext: BaseAudioContext): void {
+ if (!this.node) {
+ return;
+ }
+ if (!this.didConnectToDestination) {
+ // @ts-expect-error destination.node is the underlying graph node
+ this.node.connect(baseContext.destination.node);
+ this.didConnectToDestination = true;
+ }
+ this.node.start();
+ }
+
+ pause(): void {
+ this.node?.pause();
+ }
+
+ seekToTime(seconds: number): void {
+ this.node?.seekToTime(seconds);
+ }
+
+ setVolume(value: number): void {
+ if (this.node) {
+ this.node.volume = value;
+ }
+ }
+
+ setLoop(value: boolean): void {
+ if (this.node) {
+ this.node.loop = value;
+ }
+ }
+
+ getDuration(): number {
+ return this.node?.duration ?? 0;
+ }
+
+ getCurrentTime(): number {
+ return this.node?.currentTime ?? 0;
+ }
+
+ startPositionTracking(onTime: (seconds: number) => void): void {
+ if (!this.node) {
+ return;
+ }
+ this.stopPositionTracking();
+ this.positionSubscription = this.emitter.addAudioEventListener(
+ 'positionChanged',
+ (event) => {
+ onTime(event.value);
+ }
+ );
+ this.node.onPositionChanged = this.positionSubscription.subscriptionId;
+ }
+
+ stopPositionTracking(): void {
+ this.positionSubscription?.remove();
+ this.positionSubscription = undefined;
+ if (this.node) {
+ this.node.onPositionChanged = '0';
+ }
+ }
+
+ private resetNodeAndSubscriptions(): void {
+ this.positionSubscription?.remove();
+ this.positionSubscription = undefined;
+ this.endedSubscription?.remove();
+ this.endedSubscription = undefined;
+
+ if (this.node) {
+ this.node.onPositionChanged = '0';
+ this.node.onEnded = '0';
+ this.node.disconnect(undefined);
+ }
+ this.node = null;
+ this.didConnectToDestination = false;
+ }
+}
diff --git a/packages/react-native-audio-api/src/development/react/Audio/AudioTagContext.ts b/packages/react-native-audio-api/src/development/react/Audio/AudioTagContext.ts
new file mode 100644
index 000000000..0ee9338f1
--- /dev/null
+++ b/packages/react-native-audio-api/src/development/react/Audio/AudioTagContext.ts
@@ -0,0 +1,30 @@
+import { createContext, useContext } from 'react';
+import type { AudioTagPlaybackState } from './types';
+
+export type AudioComponentContextType = {
+ play: () => void;
+ pause: () => void;
+ seekToTime: (seconds: number) => void;
+ volume: number;
+ setVolume: (volume: number) => void;
+ muted: boolean;
+ setMuted: (muted: boolean) => void;
+ isReady: boolean;
+ playbackState: AudioTagPlaybackState;
+ currentTime: number;
+ duration: number;
+};
+
+export const AudioComponentContext = createContext<
+ AudioComponentContextType | undefined
+>(undefined);
+
+export function useAudioTagContext(): AudioComponentContextType {
+ const context = useContext(AudioComponentContext);
+
+ if (context === undefined) {
+ throw new Error('useAudioTagContext must be used within an