Commit c76c33fb authored by Juan Linietsky's avatar Juan Linietsky Committed by Hein-Pieter van Braam-Stewart

Added generator audio stream, and spectrum analyzer audio effect

Made AudioFrame and Vector2 equivalent for casting.
Added ability to obtain the playback object from stream players.
Added ability to obtain effect instance from audio server.

(cherry picked from commit e33764744cb2bf72ee77c823c3beeb6dc870d2dc)
parent 5e02d6d9
......@@ -31,6 +31,7 @@
#ifndef AUDIOFRAME_H
#define AUDIOFRAME_H
#include "core/math/vector2.h"
#include "core/typedefs.h"
static inline float undenormalise(volatile float f) {
......@@ -128,6 +129,14 @@ struct AudioFrame {
return *this;
}
_ALWAYS_INLINE_ operator Vector2() const {
return Vector2(l, r);
}
_ALWAYS_INLINE_ AudioFrame(const Vector2 &p_v2) {
l = p_v2.x;
r = p_v2.y;
}
_ALWAYS_INLINE_ AudioFrame() {}
};
......
......@@ -473,6 +473,10 @@ bool AudioStreamPlayer2D::get_stream_paused() const {
return stream_paused;
}
Ref<AudioStreamPlayback> AudioStreamPlayer2D::get_stream_playback() {
return stream_playback;
}
void AudioStreamPlayer2D::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_stream", "stream"), &AudioStreamPlayer2D::set_stream);
......@@ -512,6 +516,8 @@ void AudioStreamPlayer2D::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_stream_paused", "pause"), &AudioStreamPlayer2D::set_stream_paused);
ClassDB::bind_method(D_METHOD("get_stream_paused"), &AudioStreamPlayer2D::get_stream_paused);
ClassDB::bind_method(D_METHOD("get_stream_playback"), &AudioStreamPlayer2D::get_stream_playback);
ClassDB::bind_method(D_METHOD("_bus_layout_changed"), &AudioStreamPlayer2D::_bus_layout_changed);
ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "stream", PROPERTY_HINT_RESOURCE_TYPE, "AudioStream"), "set_stream", "get_stream");
......
......@@ -130,6 +130,8 @@ public:
void set_stream_paused(bool p_pause);
bool get_stream_paused() const;
Ref<AudioStreamPlayback> get_stream_playback();
AudioStreamPlayer2D();
~AudioStreamPlayer2D();
};
......
......@@ -887,6 +887,10 @@ bool AudioStreamPlayer3D::get_stream_paused() const {
return stream_paused;
}
Ref<AudioStreamPlayback> AudioStreamPlayer3D::get_stream_playback() {
return stream_playback;
}
void AudioStreamPlayer3D::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_stream", "stream"), &AudioStreamPlayer3D::set_stream);
......@@ -953,6 +957,8 @@ void AudioStreamPlayer3D::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_stream_paused", "pause"), &AudioStreamPlayer3D::set_stream_paused);
ClassDB::bind_method(D_METHOD("get_stream_paused"), &AudioStreamPlayer3D::get_stream_paused);
ClassDB::bind_method(D_METHOD("get_stream_playback"), &AudioStreamPlayer3D::get_stream_playback);
ClassDB::bind_method(D_METHOD("_bus_layout_changed"), &AudioStreamPlayer3D::_bus_layout_changed);
ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "stream", PROPERTY_HINT_RESOURCE_TYPE, "AudioStream"), "set_stream", "get_stream");
......
......@@ -206,6 +206,8 @@ public:
void set_stream_paused(bool p_pause);
bool get_stream_paused() const;
Ref<AudioStreamPlayback> get_stream_playback();
AudioStreamPlayer3D();
~AudioStreamPlayer3D();
};
......
......@@ -331,6 +331,10 @@ void AudioStreamPlayer::_bus_layout_changed() {
_change_notify();
}
Ref<AudioStreamPlayback> AudioStreamPlayer::get_stream_playback() {
return stream_playback;
}
void AudioStreamPlayer::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_stream", "stream"), &AudioStreamPlayer::set_stream);
......@@ -366,6 +370,8 @@ void AudioStreamPlayer::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_stream_paused", "pause"), &AudioStreamPlayer::set_stream_paused);
ClassDB::bind_method(D_METHOD("get_stream_paused"), &AudioStreamPlayer::get_stream_paused);
ClassDB::bind_method(D_METHOD("get_stream_playback"), &AudioStreamPlayer::get_stream_playback);
ADD_PROPERTY(PropertyInfo(Variant::OBJECT, "stream", PROPERTY_HINT_RESOURCE_TYPE, "AudioStream"), "set_stream", "get_stream");
ADD_PROPERTY(PropertyInfo(Variant::REAL, "volume_db", PROPERTY_HINT_RANGE, "-80,24"), "set_volume_db", "get_volume_db");
ADD_PROPERTY(PropertyInfo(Variant::REAL, "pitch_scale", PROPERTY_HINT_RANGE, "0.01,32,0.01"), "set_pitch_scale", "get_pitch_scale");
......
......@@ -106,6 +106,9 @@ public:
void set_stream_paused(bool p_pause);
bool get_stream_paused() const;
Ref<AudioStreamPlayback> get_stream_playback();
AudioStreamPlayer();
~AudioStreamPlayer();
};
......
#include "audio_effect_spectrum_analyzer.h"
#include "servers/audio_server.h"
static void smbFft(float *fftBuffer, long fftFrameSize, long sign)
/*
FFT routine, (C)1996 S.M.Bernsee. Sign = -1 is FFT, 1 is iFFT (inverse)
Fills fftBuffer[0...2*fftFrameSize-1] with the Fourier transform of the
time domain data in fftBuffer[0...2*fftFrameSize-1]. The FFT array takes
and returns the cosine and sine parts in an interleaved manner, ie.
fftBuffer[0] = cosPart[0], fftBuffer[1] = sinPart[0], asf. fftFrameSize
must be a power of 2. It expects a complex input signal (see footnote 2),
ie. when working with 'common' audio signals our input signal has to be
passed as {in[0],0.,in[1],0.,in[2],0.,...} asf. In that case, the transform
of the frequencies of interest is in fftBuffer[0...fftFrameSize].
*/
{
float wr, wi, arg, *p1, *p2, temp;
float tr, ti, ur, ui, *p1r, *p1i, *p2r, *p2i;
long i, bitm, j, le, le2, k;
for (i = 2; i < 2 * fftFrameSize - 2; i += 2) {
for (bitm = 2, j = 0; bitm < 2 * fftFrameSize; bitm <<= 1) {
if (i & bitm) j++;
j <<= 1;
}
if (i < j) {
p1 = fftBuffer + i;
p2 = fftBuffer + j;
temp = *p1;
*(p1++) = *p2;
*(p2++) = temp;
temp = *p1;
*p1 = *p2;
*p2 = temp;
}
}
for (k = 0, le = 2; k < (long)(log((double)fftFrameSize) / log(2.) + .5); k++) {
le <<= 1;
le2 = le >> 1;
ur = 1.0;
ui = 0.0;
arg = Math_PI / (le2 >> 1);
wr = cos(arg);
wi = sign * sin(arg);
for (j = 0; j < le2; j += 2) {
p1r = fftBuffer + j;
p1i = p1r + 1;
p2r = p1r + le2;
p2i = p2r + 1;
for (i = j; i < 2 * fftFrameSize; i += le) {
tr = *p2r * ur - *p2i * ui;
ti = *p2r * ui + *p2i * ur;
*p2r = *p1r - tr;
*p2i = *p1i - ti;
*p1r += tr;
*p1i += ti;
p1r += le;
p1i += le;
p2r += le;
p2i += le;
}
tr = ur * wr - ui * wi;
ui = ur * wi + ui * wr;
ur = tr;
}
}
}
void AudioEffectSpectrumAnalyzerInstance::process(const AudioFrame *p_src_frames, AudioFrame *p_dst_frames, int p_frame_count) {
uint64_t time = OS::get_singleton()->get_ticks_usec();
//copy everything over first, since this only really does capture
for (int i = 0; i < p_frame_count; i++) {
p_dst_frames[i] = p_src_frames[i];
}
//capture spectrum
while (p_frame_count) {
int to_fill = fft_size * 2 - temporal_fft_pos;
to_fill = MIN(to_fill, p_frame_count);
float *fftw = temporal_fft.ptrw();
for (int i = 0; i < to_fill; i++) { //left and right buffers
fftw[(i + temporal_fft_pos) * 2] = p_src_frames[i].l;
fftw[(i + temporal_fft_pos) * 2 + 1] = 0;
fftw[(i + temporal_fft_pos + fft_size * 2) * 2] = p_src_frames[i].r;
fftw[(i + temporal_fft_pos + fft_size * 2) * 2 + 1] = 0;
}
p_src_frames += to_fill;
temporal_fft_pos += to_fill;
p_frame_count -= to_fill;
if (temporal_fft_pos == fft_size * 2) {
//time to do a FFT
smbFft(fftw, fft_size * 2, -1);
smbFft(fftw + fft_size * 4, fft_size * 2, -1);
int next = (fft_pos + 1) % fft_count;
AudioFrame *hw = (AudioFrame *)fft_history[next].ptr(); //do not use write, avoid cow
for (int i = 0; i < fft_size; i++) {
//abs(vec)/fft_size normalizes each frequency
float window = 1.0; //-.5 * Math::cos(2. * Math_PI * (double)i / (double)fft_size) + .5;
hw[i].l = window * Vector2(fftw[i * 2], fftw[i * 2 + 1]).length() / float(fft_size);
hw[i].r = window * Vector2(fftw[fft_size * 4 + i * 2], fftw[fft_size * 4 + i * 2 + 1]).length() / float(fft_size);
}
fft_pos = next; //swap
temporal_fft_pos = 0;
}
}
//determine time of capture
double remainer_sec = (temporal_fft_pos / mix_rate); //substract remainder from mix time
last_fft_time = time - uint64_t(remainer_sec * 1000000.0);
}
void AudioEffectSpectrumAnalyzerInstance::_bind_methods() {
ClassDB::bind_method(D_METHOD("get_magnitude_for_frequency_range", "from_hz", "to_hz", "mode"), &AudioEffectSpectrumAnalyzerInstance::get_magnitude_for_frequency_range, DEFVAL(MAGNITUDE_MAX));
BIND_ENUM_CONSTANT(MAGNITUDE_AVERAGE);
BIND_ENUM_CONSTANT(MAGNITUDE_MAX);
}
Vector2 AudioEffectSpectrumAnalyzerInstance::get_magnitude_for_frequency_range(float p_begin, float p_end, MagnitudeMode p_mode) const {
if (last_fft_time == 0) {
return Vector2();
}
uint64_t time = OS::get_singleton()->get_ticks_usec();
float diff = double(time - last_fft_time) / 1000000.0 + base->get_tap_back_pos();
diff -= AudioServer::get_singleton()->get_output_delay();
float fft_time_size = float(fft_size) / mix_rate;
int fft_index = fft_pos;
while (diff > fft_time_size) {
diff -= fft_time_size;
fft_index -= 1;
if (fft_index < 0) {
fft_index = fft_count - 1;
}
}
int begin_pos = p_begin * fft_size / (mix_rate * 0.5);
int end_pos = p_end * fft_size / (mix_rate * 0.5);
begin_pos = CLAMP(begin_pos, 0, fft_size - 1);
end_pos = CLAMP(end_pos, 0, fft_size - 1);
if (begin_pos > end_pos) {
SWAP(begin_pos, end_pos);
}
const AudioFrame *r = fft_history[fft_index].ptr();
if (p_mode == MAGNITUDE_AVERAGE) {
Vector2 avg;
for (int i = begin_pos; i <= end_pos; i++) {
avg += Vector2(r[i]);
}
avg /= float(end_pos - begin_pos + 1);
return avg;
} else {
Vector2 max;
for (int i = begin_pos; i <= end_pos; i++) {
max.x = MAX(max.x, r[i].l);
max.y = MAX(max.x, r[i].r);
}
return max;
}
}
Ref<AudioEffectInstance> AudioEffectSpectrumAnalyzer::instance() {
Ref<AudioEffectSpectrumAnalyzerInstance> ins;
ins.instance();
ins->base = Ref<AudioEffectSpectrumAnalyzer>(this);
static const int fft_sizes[FFT_SIZE_MAX] = { 256, 512, 1024, 2048, 4096 };
ins->fft_size = fft_sizes[fft_size];
ins->mix_rate = AudioServer::get_singleton()->get_mix_rate();
ins->fft_count = (buffer_length / (float(ins->fft_size) / ins->mix_rate)) + 1;
ins->fft_pos = 0;
ins->last_fft_time = 0;
ins->fft_history.resize(ins->fft_count);
ins->temporal_fft.resize(ins->fft_size * 8); //x2 stereo, x2 amount of samples for freqs, x2 for input
ins->temporal_fft_pos = 0;
for (int i = 0; i < ins->fft_count; i++) {
ins->fft_history.write[i].resize(ins->fft_size); //only magnitude matters
for (int j = 0; j < ins->fft_size; j++) {
ins->fft_history.write[i].write[j] = AudioFrame(0, 0);
}
}
return ins;
}
void AudioEffectSpectrumAnalyzer::set_buffer_length(float p_volume) {
buffer_length = p_volume;
}
float AudioEffectSpectrumAnalyzer::get_buffer_length() const {
return buffer_length;
}
void AudioEffectSpectrumAnalyzer::set_tap_back_pos(float p_seconds) {
tapback_pos = p_seconds;
}
float AudioEffectSpectrumAnalyzer::get_tap_back_pos() const {
return tapback_pos;
}
void AudioEffectSpectrumAnalyzer::set_fft_size(FFT_Size p_fft_size) {
ERR_FAIL_INDEX(p_fft_size, FFT_SIZE_MAX);
fft_size = p_fft_size;
}
AudioEffectSpectrumAnalyzer::FFT_Size AudioEffectSpectrumAnalyzer::get_fft_size() const {
return fft_size;
}
void AudioEffectSpectrumAnalyzer::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_buffer_length", "seconds"), &AudioEffectSpectrumAnalyzer::set_buffer_length);
ClassDB::bind_method(D_METHOD("get_buffer_length"), &AudioEffectSpectrumAnalyzer::get_buffer_length);
ClassDB::bind_method(D_METHOD("set_tap_back_pos", "seconds"), &AudioEffectSpectrumAnalyzer::set_tap_back_pos);
ClassDB::bind_method(D_METHOD("get_tap_back_pos"), &AudioEffectSpectrumAnalyzer::get_tap_back_pos);
ClassDB::bind_method(D_METHOD("set_fft_size", "size"), &AudioEffectSpectrumAnalyzer::set_fft_size);
ClassDB::bind_method(D_METHOD("get_fft_size"), &AudioEffectSpectrumAnalyzer::get_fft_size);
ADD_PROPERTY(PropertyInfo(Variant::REAL, "buffer_length", PROPERTY_HINT_RANGE, "0.1,4,0.1"), "set_buffer_length", "get_buffer_length");
ADD_PROPERTY(PropertyInfo(Variant::REAL, "tap_back_pos", PROPERTY_HINT_RANGE, "0.1,4,0.1"), "set_tap_back_pos", "get_tap_back_pos");
ADD_PROPERTY(PropertyInfo(Variant::INT, "fft_size", PROPERTY_HINT_ENUM, "256,512,1024,2048,4096"), "set_fft_size", "get_fft_size");
}
AudioEffectSpectrumAnalyzer::AudioEffectSpectrumAnalyzer() {
buffer_length = 2;
tapback_pos = 0.01;
fft_size = FFT_SIZE_1024;
}
#ifndef AUDIO_EFFECT_SPECTRUM_ANALYZER_H
#define AUDIO_EFFECT_SPECTRUM_ANALYZER_H
#include "servers/audio/audio_effect.h"
class AudioEffectSpectrumAnalyzer;
class AudioEffectSpectrumAnalyzerInstance : public AudioEffectInstance {
GDCLASS(AudioEffectSpectrumAnalyzerInstance, AudioEffectInstance)
public:
enum MagnitudeMode {
MAGNITUDE_AVERAGE,
MAGNITUDE_MAX,
};
private:
friend class AudioEffectSpectrumAnalyzer;
Ref<AudioEffectSpectrumAnalyzer> base;
Vector<Vector<AudioFrame> > fft_history;
Vector<float> temporal_fft;
int temporal_fft_pos;
int fft_size;
int fft_count;
int fft_pos;
float mix_rate;
uint64_t last_fft_time;
protected:
static void _bind_methods();
public:
virtual void process(const AudioFrame *p_src_frames, AudioFrame *p_dst_frames, int p_frame_count);
Vector2 get_magnitude_for_frequency_range(float p_begin, float p_end, MagnitudeMode p_mode = MAGNITUDE_MAX) const;
};
VARIANT_ENUM_CAST(AudioEffectSpectrumAnalyzerInstance::MagnitudeMode)
class AudioEffectSpectrumAnalyzer : public AudioEffect {
GDCLASS(AudioEffectSpectrumAnalyzer, AudioEffect)
public:
enum FFT_Size {
FFT_SIZE_256,
FFT_SIZE_512,
FFT_SIZE_1024,
FFT_SIZE_2048,
FFT_SIZE_4096,
FFT_SIZE_MAX
};
public:
friend class AudioEffectSpectrumAnalyzerInstance;
float buffer_length;
float tapback_pos;
FFT_Size fft_size;
protected:
static void _bind_methods();
public:
Ref<AudioEffectInstance> instance();
void set_buffer_length(float p_seconds);
float get_buffer_length() const;
void set_tap_back_pos(float p_seconds);
float get_tap_back_pos() const;
void set_fft_size(FFT_Size);
FFT_Size get_fft_size() const;
AudioEffectSpectrumAnalyzer();
};
VARIANT_ENUM_CAST(AudioEffectSpectrumAnalyzer::FFT_Size);
#endif // AUDIO_EFFECT_SPECTRUM_ANALYZER_H
#include "audio_stream_generator.h"
void AudioStreamGenerator::set_mix_rate(float p_mix_rate) {
mix_rate = p_mix_rate;
}
float AudioStreamGenerator::get_mix_rate() const {
return mix_rate;
}
void AudioStreamGenerator::set_buffer_length(float p_seconds) {
buffer_len = p_seconds;
}
float AudioStreamGenerator::get_buffer_length() const {
return buffer_len;
}
Ref<AudioStreamPlayback> AudioStreamGenerator::instance_playback() {
Ref<AudioStreamGeneratorPlayback> playback;
playback.instance();
playback->generator = this;
int target_buffer_size = mix_rate * buffer_len;
playback->buffer.resize(nearest_shift(target_buffer_size));
playback->buffer.clear();
return playback;
}
String AudioStreamGenerator::get_stream_name() const {
return "UserFeed";
}
float AudioStreamGenerator::get_length() const {
return 0;
}
void AudioStreamGenerator::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_mix_rate", "hz"), &AudioStreamGenerator::set_mix_rate);
ClassDB::bind_method(D_METHOD("get_mix_rate"), &AudioStreamGenerator::get_mix_rate);
ClassDB::bind_method(D_METHOD("set_buffer_length", "seconds"), &AudioStreamGenerator::set_buffer_length);
ClassDB::bind_method(D_METHOD("get_buffer_length"), &AudioStreamGenerator::get_buffer_length);
ADD_PROPERTY(PropertyInfo(Variant::REAL, "mix_rate", PROPERTY_HINT_RANGE, "20,192000,1"), "set_mix_rate", "get_mix_rate");
ADD_PROPERTY(PropertyInfo(Variant::REAL, "buffer_length", PROPERTY_HINT_RANGE, "0.01,10,0.01"), "set_buffer_length", "get_buffer_length");
}
AudioStreamGenerator::AudioStreamGenerator() {
mix_rate = 44100;
buffer_len = 0.5;
}
////////////////
bool AudioStreamGeneratorPlayback::push_frame(const Vector2 &p_frame) {
if (buffer.space_left() < 1) {
return false;
}
AudioFrame f = p_frame;
buffer.write(&f, 1);
return true;
}
bool AudioStreamGeneratorPlayback::can_push_buffer(int p_frames) const {
return buffer.space_left() >= p_frames;
}
bool AudioStreamGeneratorPlayback::push_buffer(const PoolVector2Array &p_frames) {
int to_write = p_frames.size();
if (buffer.space_left() < to_write) {
return false;
}
PoolVector2Array::Read r = p_frames.read();
if (sizeof(real_t) == 4) {
//write directly
buffer.write((const AudioFrame *)r.ptr(), to_write);
} else {
//convert from double
AudioFrame buf[2048];
int ofs = 0;
while (to_write) {
int w = MIN(to_write, 2048);
for (int i = 0; i < w; i++) {
buf[i] = r[i + ofs];
}
buffer.write(buf, w);
ofs += w;
to_write -= w;
}
}
return true;
}
int AudioStreamGeneratorPlayback::get_frames_available() const {
return buffer.space_left();
}
int AudioStreamGeneratorPlayback::get_skips() const {
return skips;
}
void AudioStreamGeneratorPlayback::clear_buffer() {
ERR_FAIL_COND(active);
buffer.clear();
mixed = 0;
}
void AudioStreamGeneratorPlayback::_mix_internal(AudioFrame *p_buffer, int p_frames) {
int read_amount = buffer.data_left();
if (p_frames < read_amount) {
read_amount = p_frames;
}
buffer.read(p_buffer, read_amount);
if (read_amount < p_frames) {
//skipped, not ideal
for (int i = read_amount; i < p_frames; i++) {
p_buffer[i] = AudioFrame(0, 0);
}
skips++;
}
mixed += p_frames / generator->get_mix_rate();
}
float AudioStreamGeneratorPlayback::get_stream_sampling_rate() {
return generator->get_mix_rate();
}
void AudioStreamGeneratorPlayback::start(float p_from_pos) {
if (mixed == 0.0) {
_begin_resample();
}
skips = 0;
active = true;
mixed = 0.0;
}
void AudioStreamGeneratorPlayback::stop() {
active = false;
}
bool AudioStreamGeneratorPlayback::is_playing() const {
return active; //always playing, can't be stopped
}
int AudioStreamGeneratorPlayback::get_loop_count() const {
return 0;
}
float AudioStreamGeneratorPlayback::get_playback_position() const {
return mixed;
}
void AudioStreamGeneratorPlayback::seek(float p_time) {
//no seek possible
}
void AudioStreamGeneratorPlayback::_bind_methods() {
ClassDB::bind_method(D_METHOD("push_frame", "frame"), &AudioStreamGeneratorPlayback::push_frame);
ClassDB::bind_method(D_METHOD("can_push_buffer", "amount"), &AudioStreamGeneratorPlayback::can_push_buffer);
ClassDB::bind_method(D_METHOD("push_buffer", "frames"), &AudioStreamGeneratorPlayback::push_buffer);
ClassDB::bind_method(D_METHOD("get_frames_available"), &AudioStreamGeneratorPlayback::get_frames_available);
ClassDB::bind_method(D_METHOD("get_skips"), &AudioStreamGeneratorPlayback::get_skips);
ClassDB::bind_method(D_METHOD("clear_buffer"), &AudioStreamGeneratorPlayback::clear_buffer);
}
AudioStreamGeneratorPlayback::AudioStreamGeneratorPlayback() {
generator = NULL;
skips = 0;
active = false;
mixed = 0;
}
#ifndef AUDIO_STREAM_USER_FED_H
#define AUDIO_STREAM_USER_FED_H
#include "core/ring_buffer.h"
#include "servers/audio/audio_stream.h"
class AudioStreamGenerator : public AudioStream {
GDCLASS(AudioStreamGenerator, AudioStream)
float mix_rate;
float buffer_len;
protected:
static void _bind_methods();
public:
void set_mix_rate(float p_mix_rate);
float get_mix_rate() const;
void set_buffer_length(float p_seconds);
float get_buffer_length() const;
virtual Ref<AudioStreamPlayback> instance_playback();
virtual String get_stream_name() const;
virtual float get_length() const;
AudioStreamGenerator();
};
class AudioStreamGeneratorPlayback : public AudioStreamPlaybackResampled {
GDCLASS(AudioStreamGeneratorPlayback, AudioStreamPlaybackResampled)
friend class AudioStreamGenerator;
RingBuffer<AudioFrame> buffer;
int skips;
bool active;
float mixed;
AudioStreamGenerator *generator;
protected:
virtual void _mix_internal(AudioFrame *p_buffer, int p_frames);
virtual float get_stream_sampling_rate();
static void _bind_methods();
public:
virtual void start(float p_from_pos = 0.0);
virtual void stop();
virtual bool is_playing() const;
virtual int get_loop_count() const; //times it looped
virtual float get_playback_position() const;
virtual void seek(float p_time);
bool push_frame(const Vector2 &p_frame);
bool can_push_buffer(int p_frames) const;
bool push_buffer(const PoolVector2Array &p_frames);
int get_frames_available() const;
int get_skips() const;
void clear_buffer();
AudioStreamGeneratorPlayback();
};
#endif // AUDIO_STREAM_USER_FED_H
......@@ -876,6 +876,15 @@ int AudioServer::get_bus_effect_count(int p_bus) {
return buses[p_bus]->effects.size();
}
Ref<AudioEffectInstance> AudioServer::get_bus_effect_instance(int p_bus, int p_effect, int p_channel) {
ERR_FAIL_INDEX_V(p_bus, buses.size(), Ref<AudioEffectInstance>());
ERR_FAIL_INDEX_V(p_effect, buses[p_bus]->effects.size(), Ref<AudioEffectInstance>());
ERR_FAIL_INDEX_V(p_channel, buses[p_bus]->channels.size(), Ref<AudioEffectInstance>());