Hi,
Here are few patches to refactor a bit and cleanup hal-audio code. Primary
goal here is to decouple codec abstraction from any write/sync operations so
in case new codec is added, it can just do encoding and does not need to care
about writing anything to socket or dealing with synchronization.
In addition, "sleeping code" is changed to use clock_nanosleep with absolute
time which results in almost perfect distribution of media packets over time,
where previously we had to "catch up" from time to time. This also seemed to
be quite unreliable over long periods of time since after some time playback
became distorted. Now it seems this issue is gone.
Andrzej Kaczmarek (5):
android/hal-audio: Add open/close_endpoint helpers
android/hal-audio: Add encode_mediapacket function
android/hal-audio: Write and sync in common code
android/hal-audio: Use payload length for codec init
android/hal-audio: Provide better audio synchronization
android/hal-audio.c | 409 ++++++++++++++++++++++++++++------------------------
1 file changed, 220 insertions(+), 189 deletions(-)
--
1.8.5.4
It makes more sense to pass maximum payload length to codec since this
is actually used for calculations. MTU value is used only to allocate
proper buffer.
---
android/hal-audio.c | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/android/hal-audio.c b/android/hal-audio.c
index b4d78ca..3a7b026 100644
--- a/android/hal-audio.c
+++ b/android/hal-audio.c
@@ -387,11 +387,10 @@ static void sbc_init_encoder(struct sbc_data *sbc_data)
in->min_bitpool, in->max_bitpool);
}
-static int sbc_codec_init(struct audio_preset *preset, uint16_t mtu,
+static int sbc_codec_init(struct audio_preset *preset, uint16_t payload_len,
void **codec_data)
{
struct sbc_data *sbc_data;
- size_t hdr_len = sizeof(struct media_packet);
size_t in_frame_len;
size_t out_frame_len;
size_t num_frames;
@@ -411,7 +410,7 @@ static int sbc_codec_init(struct audio_preset *preset, uint16_t mtu,
in_frame_len = sbc_get_codesize(&sbc_data->enc);
out_frame_len = sbc_get_frame_length(&sbc_data->enc);
- num_frames = (mtu - hdr_len) / out_frame_len;
+ num_frames = payload_len / out_frame_len;
sbc_data->in_frame_len = in_frame_len;
sbc_data->in_buf_size = num_frames * in_frame_len;
@@ -421,8 +420,8 @@ static int sbc_codec_init(struct audio_preset *preset, uint16_t mtu,
sbc_data->frame_duration = sbc_get_frame_duration(&sbc_data->enc);
sbc_data->frames_per_packet = num_frames;
- DBG("mtu=%u in_frame_len=%zu out_frame_len=%zu frames_per_packet=%zu",
- mtu, in_frame_len, out_frame_len, num_frames);
+ DBG("in_frame_len=%zu out_frame_len=%zu frames_per_packet=%zu",
+ in_frame_len, out_frame_len, num_frames);
*codec_data = sbc_data;
@@ -885,6 +884,7 @@ static bool open_endpoint(struct audio_endpoint *ep,
struct audio_preset *preset;
const struct audio_codec *codec;
uint16_t mtu;
+ uint16_t payload_len;
int fd;
if (ipc_open_stream_cmd(ep->id, &mtu, &fd, &preset) !=
@@ -899,10 +899,14 @@ static bool open_endpoint(struct audio_endpoint *ep,
return false;
}
+ DBG("mtu=%u", mtu);
+
+ payload_len = mtu - sizeof(*ep->mp);
+
ep->fd = fd;
codec = ep->codec;
- codec->init(preset, mtu, &ep->codec_data);
+ codec->init(preset, payload_len, &ep->codec_data);
codec->get_config(ep->codec_data, cfg);
ep->mp = calloc(mtu, 1);
@@ -910,7 +914,7 @@ static bool open_endpoint(struct audio_endpoint *ep,
ep->mp->hdr.pt = 1;
ep->mp->hdr.ssrc = htonl(1);
- ep->mp_data_len = mtu - sizeof(*ep->mp);
+ ep->mp_data_len = payload_len;
free(preset);
--
1.8.5.4
There's no need for codec abstraction to take care of writing and
synchronizing actual media stream since this is something that common
code can do regardless of codec used.
Data are synchronized based on number of samples consumed from input
stream instead of frames encoded to output stream which is also more
reliable since it's not affected by encoder errors.
---
android/hal-audio.c | 188 +++++++++++++++++++++++++---------------------------
1 file changed, 91 insertions(+), 97 deletions(-)
diff --git a/android/hal-audio.c b/android/hal-audio.c
index 78889e5..b4d78ca 100644
--- a/android/hal-audio.c
+++ b/android/hal-audio.c
@@ -130,17 +130,9 @@ struct sbc_data {
size_t in_buf_size;
size_t out_frame_len;
- size_t out_buf_size;
- uint8_t *out_buf;
unsigned frame_duration;
unsigned frames_per_packet;
-
- struct timespec start;
- unsigned frames_sent;
- uint32_t timestamp;
-
- uint16_t seq;
};
static inline void timespec_diff(struct timespec *a, struct timespec *b,
@@ -162,9 +154,9 @@ static int sbc_cleanup(void *codec_data);
static int sbc_get_config(void *codec_data, struct audio_input_config *config);
static size_t sbc_get_buffer_size(void *codec_data);
static size_t sbc_get_mediapacket_duration(void *codec_data);
-static void sbc_resume(void *codec_data);
-static ssize_t sbc_write_data(void *codec_data, const void *buffer,
- size_t bytes, int fd);
+static ssize_t sbc_encode_mediapacket(void *codec_data, const uint8_t *buffer,
+ size_t len, struct media_packet *mp,
+ size_t mp_data_len, size_t *written);
struct audio_codec {
uint8_t type;
@@ -178,9 +170,9 @@ struct audio_codec {
struct audio_input_config *config);
size_t (*get_buffer_size) (void *codec_data);
size_t (*get_mediapacket_duration) (void *codec_data);
- void (*resume) (void *codec_data);
- ssize_t (*write_data) (void *codec_data, const void *buffer,
- size_t bytes, int fd);
+ ssize_t (*encode_mediapacket) (void *codec_data, const uint8_t *buffer,
+ size_t len, struct media_packet *mp,
+ size_t mp_data_len, size_t *written);
};
static const struct audio_codec audio_codecs[] = {
@@ -194,8 +186,7 @@ static const struct audio_codec audio_codecs[] = {
.get_config = sbc_get_config,
.get_buffer_size = sbc_get_buffer_size,
.get_mediapacket_duration = sbc_get_mediapacket_duration,
- .resume = sbc_resume,
- .write_data = sbc_write_data,
+ .encode_mediapacket = sbc_encode_mediapacket,
}
};
@@ -208,6 +199,13 @@ struct audio_endpoint {
const struct audio_codec *codec;
void *codec_data;
int fd;
+
+ struct media_packet *mp;
+ size_t mp_data_len;
+
+ uint16_t seq;
+ uint32_t samples;
+ struct timespec start;
};
static struct audio_endpoint audio_endpoints[MAX_AUDIO_ENDPOINTS];
@@ -419,8 +417,6 @@ static int sbc_codec_init(struct audio_preset *preset, uint16_t mtu,
sbc_data->in_buf_size = num_frames * in_frame_len;
sbc_data->out_frame_len = out_frame_len;
- sbc_data->out_buf_size = hdr_len + num_frames * out_frame_len;
- sbc_data->out_buf = calloc(1, sbc_data->out_buf_size);
sbc_data->frame_duration = sbc_get_frame_duration(&sbc_data->enc);
sbc_data->frames_per_packet = num_frames;
@@ -438,7 +434,6 @@ static int sbc_cleanup(void *codec_data)
struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
sbc_finish(&sbc_data->enc);
- free(sbc_data->out_buf);
free(codec_data);
return AUDIO_STATUS_SUCCESS;
@@ -486,28 +481,18 @@ static size_t sbc_get_mediapacket_duration(void *codec_data)
return sbc_data->frame_duration * sbc_data->frames_per_packet;
}
-static void sbc_resume(void *codec_data)
-{
- struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
-
- DBG("");
-
- clock_gettime(CLOCK_MONOTONIC, &sbc_data->start);
-
- sbc_data->frames_sent = 0;
- sbc_data->timestamp = 0;
-}
-
-static int write_media_packet(int fd, struct sbc_data *sbc_data,
- struct media_packet *mp, size_t data_len)
+static int write_media_packet(struct a2dp_stream_out *out, size_t mp_data_len,
+ uint32_t input_samples)
{
+ struct audio_endpoint *ep = out->ep;
+ struct media_packet *mp = ep->mp;
struct timespec cur;
struct timespec diff;
- unsigned expected_frames;
+ uint32_t expected_samples;
int ret;
while (true) {
- ret = write(fd, mp, sizeof(*mp) + data_len);
+ ret = write(ep->fd, mp, sizeof(*mp) + mp_data_len);
if (ret >= 0)
break;
@@ -515,12 +500,10 @@ static int write_media_packet(int fd, struct sbc_data *sbc_data,
return -errno;
}
- sbc_data->frames_sent += mp->payload.frame_count;
-
clock_gettime(CLOCK_MONOTONIC, &cur);
- timespec_diff(&cur, &sbc_data->start, &diff);
- expected_frames = (diff.tv_sec * 1000000 + diff.tv_nsec / 1000) /
- sbc_data->frame_duration;
+ timespec_diff(&cur, &ep->start, &diff);
+ expected_samples = (diff.tv_sec * 1000000ll + diff.tv_nsec / 1000ll) *
+ out->cfg.rate / 1000000ll;
/* AudioFlinger does not seem to provide any *working*
* API to provide data in some interval and will just
@@ -531,9 +514,8 @@ static int write_media_packet(int fd, struct sbc_data *sbc_data,
* lagging behind audio stream, we can sleep for
* duration of single media packet.
*/
- if (sbc_data->frames_sent >= expected_frames)
- usleep(sbc_data->frame_duration *
- mp->payload.frame_count);
+ if (ep->samples >= expected_samples)
+ usleep(input_samples * 1000000 / out->cfg.rate);
return ret;
}
@@ -574,53 +556,6 @@ static ssize_t sbc_encode_mediapacket(void *codec_data, const uint8_t *buffer,
return consumed;
}
-static ssize_t sbc_write_data(void *codec_data, const void *buffer,
- size_t bytes, int fd)
-{
- struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
- size_t consumed = 0;
- struct media_packet *mp = (struct media_packet *) sbc_data->out_buf;
- size_t free_space = sbc_data->out_buf_size - sizeof(*mp);
-
- mp->hdr.v = 2;
- mp->hdr.pt = 1;
- mp->hdr.ssrc = htonl(1);
-
- while (consumed < bytes) {
- size_t written = 0;
- ssize_t read;
- int ret;
-
- read = sbc_encode_mediapacket(codec_data, buffer + consumed,
- bytes - consumed, mp,
- free_space,
- &written);
-
- if (read <= 0) {
- error("sbc_encode_mediapacket failed (%zd)", read);
- goto done;
- }
-
- consumed += read;
-
- mp->hdr.sequence_number = htons(sbc_data->seq++);
- mp->hdr.timestamp = htonl(sbc_data->timestamp);
-
- /* AudioFlinger provides PCM 16bit stereo only, thus sample size
- * is always 4 bytes
- */
- sbc_data->timestamp += (read / 4);
-
- ret = write_media_packet(fd, sbc_data, mp, written);
- if (ret < 0)
- return ret;
- }
-
-done:
- /* we always assume that all data was processed and sent */
- return bytes;
-}
-
static int audio_ipc_cmd(uint8_t service_id, uint8_t opcode, uint16_t len,
void *param, size_t *rsp_len, void *rsp, int *fd)
{
@@ -970,6 +905,13 @@ static bool open_endpoint(struct audio_endpoint *ep,
codec->init(preset, mtu, &ep->codec_data);
codec->get_config(ep->codec_data, cfg);
+ ep->mp = calloc(mtu, 1);
+ ep->mp->hdr.v = 2;
+ ep->mp->hdr.pt = 1;
+ ep->mp->hdr.ssrc = htonl(1);
+
+ ep->mp_data_len = mtu - sizeof(*ep->mp);
+
free(preset);
return true;
@@ -983,6 +925,8 @@ static void close_endpoint(struct audio_endpoint *ep)
ep->fd = -1;
}
+ free(ep->mp);
+
ep->codec->cleanup(ep->codec_data);
ep->codec_data = NULL;
}
@@ -1002,10 +946,59 @@ static void downmix_to_mono(struct a2dp_stream_out *out, const uint8_t *buffer,
}
}
+static bool write_data(struct a2dp_stream_out *out, const void *buffer,
+ size_t bytes)
+{
+ struct audio_endpoint *ep = out->ep;
+ struct media_packet *mp = (struct media_packet *) ep->mp;
+ size_t free_space = ep->mp_data_len;
+ size_t consumed = 0;
+
+ while (consumed < bytes) {
+ size_t written = 0;
+ ssize_t read;
+ uint32_t samples;
+ int ret;
+
+ read = ep->codec->encode_mediapacket(ep->codec_data,
+ buffer + consumed,
+ bytes - consumed, mp,
+ free_space, &written);
+
+ /* This is non-fatal and we can just assume buffer was processed
+ * properly and wait for next one.
+ */
+ if (read <= 0)
+ goto done;
+
+ consumed += read;
+
+ mp->hdr.sequence_number = htons(ep->seq++);
+ mp->hdr.timestamp = htonl(ep->samples);
+
+ /* AudioFlinger provides 16bit PCM, so sample size is 2 bytes
+ * multipled by number of channels. Number of channels is simply
+ * number of bits set in channels mask.
+ */
+ samples = read / (2 * popcount(out->cfg.channels));
+ ep->samples += samples;
+
+ ret = write_media_packet(out, written, samples);
+ if (ret < 0)
+ return false;
+ }
+
+done:
+ return true;
+}
+
+
static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
size_t bytes)
{
struct a2dp_stream_out *out = (struct a2dp_stream_out *) stream;
+ const void *in_buf = buffer;
+ size_t in_len = bytes;
/* just return in case we're closing */
if (out->audio_state == AUDIO_A2DP_STATE_NONE)
@@ -1018,7 +1011,8 @@ static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
if (ipc_resume_stream_cmd(out->ep->id) != AUDIO_STATUS_SUCCESS)
return -1;
- out->ep->codec->resume(out->ep->codec_data);
+ clock_gettime(CLOCK_MONOTONIC, &out->ep->start);
+ out->ep->samples = 0;
out->audio_state = AUDIO_A2DP_STATE_STARTED;
}
@@ -1048,14 +1042,14 @@ static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
downmix_to_mono(out, buffer, bytes);
- return out->ep->codec->write_data(out->ep->codec_data,
- out->downmix_buf,
- bytes / 2,
- out->ep->fd) * 2;
+ in_buf = out->downmix_buf;
+ in_len = bytes / 2;
}
- return out->ep->codec->write_data(out->ep->codec_data, buffer,
- bytes, out->ep->fd);
+ if (!write_data(out, in_buf, in_len))
+ return -1;
+
+ return bytes;
}
static uint32_t out_get_sample_rate(const struct audio_stream *stream)
--
1.8.5.4
---
android/hal-audio.c | 112 +++++++++++++++++++++++++++++-----------------------
1 file changed, 63 insertions(+), 49 deletions(-)
diff --git a/android/hal-audio.c b/android/hal-audio.c
index e1f3f0d..36e8d2e 100644
--- a/android/hal-audio.c
+++ b/android/hal-audio.c
@@ -914,6 +914,67 @@ static void unregister_endpoints(void)
}
}
+static int set_blocking(int fd)
+{
+ int flags;
+
+ flags = fcntl(fd, F_GETFL, 0);
+ if (flags < 0) {
+ error("fcntl(F_GETFL): %s (%d)", strerror(errno), errno);
+ return -errno;
+ }
+
+ if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) < 0) {
+ error("fcntl(F_SETFL): %s (%d)", strerror(errno), errno);
+ return -errno;
+ }
+
+ return 0;
+}
+
+static bool open_endpoint(struct audio_endpoint *ep,
+ struct audio_input_config *cfg)
+{
+ struct audio_preset *preset;
+ const struct audio_codec *codec;
+ uint16_t mtu;
+ int fd;
+
+ if (ipc_open_stream_cmd(ep->id, &mtu, &fd, &preset) !=
+ AUDIO_STATUS_SUCCESS)
+ return false;
+
+ if (!preset || fd < 0)
+ return false;
+
+ if (set_blocking(fd) < 0) {
+ free(preset);
+ return false;
+ }
+
+ ep->fd = fd;
+
+ codec = ep->codec;
+ codec->init(preset, mtu, &ep->codec_data);
+ codec->get_config(ep->codec_data, cfg);
+
+ free(preset);
+
+ return true;
+}
+
+static void close_endpoint(struct audio_endpoint *ep)
+{
+ ipc_close_stream_cmd(ep->id);
+ if (ep->fd >= 0) {
+ close(ep->fd);
+ ep->fd = -1;
+ }
+
+ ep->codec->cleanup(ep->codec_data);
+ ep->codec_data = NULL;
+}
+
static void downmix_to_mono(struct a2dp_stream_out *out, const uint8_t *buffer,
size_t bytes)
{
@@ -1260,24 +1321,6 @@ static int in_remove_audio_effect(const struct audio_stream *stream,
return -ENOSYS;
}
-static int set_blocking(int fd)
-{
- int flags;
-
- flags = fcntl(fd, F_GETFL, 0);
- if (flags < 0) {
- error("fcntl(F_GETFL): %s (%d)", strerror(errno), errno);
- return -errno;
- }
-
- if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) < 0) {
- error("fcntl(F_SETFL): %s (%d)", strerror(errno), errno);
- return -errno;
- }
-
- return 0;
-}
-
static int audio_open_output_stream(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
@@ -1288,10 +1331,6 @@ static int audio_open_output_stream(struct audio_hw_device *dev,
{
struct a2dp_audio_dev *a2dp_dev = (struct a2dp_audio_dev *) dev;
struct a2dp_stream_out *out;
- struct audio_preset *preset;
- const struct audio_codec *codec;
- uint16_t mtu;
- int fd;
out = calloc(1, sizeof(struct a2dp_stream_out));
if (!out)
@@ -1319,29 +1358,12 @@ static int audio_open_output_stream(struct audio_hw_device *dev,
/* TODO: for now we always use endpoint 0 */
out->ep = &audio_endpoints[0];
- if (ipc_open_stream_cmd(out->ep->id, &mtu, &fd, &preset) !=
- AUDIO_STATUS_SUCCESS)
- goto fail;
-
- if (!preset || fd < 0)
- goto fail;
-
- if (set_blocking(fd) < 0) {
- free(preset);
+ if (!open_endpoint(out->ep, &out->cfg))
goto fail;
- }
-
- out->ep->fd = fd;
- codec = out->ep->codec;
-
- codec->init(preset, mtu, &out->ep->codec_data);
- codec->get_config(out->ep->codec_data, &out->cfg);
DBG("rate=%d channels=%d format=%d", out->cfg.rate,
out->cfg.channels, out->cfg.format);
- free(preset);
-
if (out->cfg.channels == AUDIO_CHANNEL_OUT_MONO) {
out->downmix_buf = malloc(FIXED_BUFFER_SIZE / 2);
if (!out->downmix_buf)
@@ -1367,18 +1389,10 @@ static void audio_close_output_stream(struct audio_hw_device *dev,
{
struct a2dp_audio_dev *a2dp_dev = (struct a2dp_audio_dev *) dev;
struct a2dp_stream_out *out = (struct a2dp_stream_out *) stream;
- struct audio_endpoint *ep = a2dp_dev->out->ep;
DBG("");
- ipc_close_stream_cmd(ep->id);
- if (ep->fd >= 0) {
- close(ep->fd);
- ep->fd = -1;
- }
-
- ep->codec->cleanup(ep->codec_data);
- ep->codec_data = NULL;
+ close_endpoint(a2dp_dev->out->ep);
free(out->downmix_buf);
--
1.8.5.4
Instead of waiting some fixed amount of useconds before next media
packet is encoded we use absolute time to wait precisely for a moment
when next packet is expected to be produced. This greatly improves flow
as is can compensate for time spent on encoding and writing data.
---
android/hal-audio.c | 91 ++++++++++++++++++++++++++++-------------------------
1 file changed, 49 insertions(+), 42 deletions(-)
diff --git a/android/hal-audio.c b/android/hal-audio.c
index 3a7b026..3ef058e 100644
--- a/android/hal-audio.c
+++ b/android/hal-audio.c
@@ -147,6 +147,27 @@ static inline void timespec_diff(struct timespec *a, struct timespec *b,
}
}
+static void timespec_add(struct timespec *base, uint64_t time_us,
+ struct timespec *res)
+{
+ res->tv_sec = base->tv_sec + time_us / 1000000;
+ res->tv_nsec = base->tv_nsec + (time_us % 1000000) * 1000;
+
+ if (res->tv_nsec >= 1000000000) {
+ res->tv_sec++;
+ res->tv_nsec -= 1000000000;
+ }
+}
+
+#if defined(ANDROID)
+/* Bionic does not have clock_nanosleep() prototype in time.h even though
+ * it provides its implementation.
+ */
+extern int clock_nanosleep(clockid_t clock_id, int flags,
+ const struct timespec *request,
+ struct timespec *remain);
+#endif
+
static int sbc_get_presets(struct audio_preset *preset, size_t *len);
static int sbc_codec_init(struct audio_preset *preset, uint16_t mtu,
void **codec_data);
@@ -480,45 +501,6 @@ static size_t sbc_get_mediapacket_duration(void *codec_data)
return sbc_data->frame_duration * sbc_data->frames_per_packet;
}
-static int write_media_packet(struct a2dp_stream_out *out, size_t mp_data_len,
- uint32_t input_samples)
-{
- struct audio_endpoint *ep = out->ep;
- struct media_packet *mp = ep->mp;
- struct timespec cur;
- struct timespec diff;
- uint32_t expected_samples;
- int ret;
-
- while (true) {
- ret = write(ep->fd, mp, sizeof(*mp) + mp_data_len);
- if (ret >= 0)
- break;
-
- if (errno != EINTR)
- return -errno;
- }
-
- clock_gettime(CLOCK_MONOTONIC, &cur);
- timespec_diff(&cur, &ep->start, &diff);
- expected_samples = (diff.tv_sec * 1000000ll + diff.tv_nsec / 1000ll) *
- out->cfg.rate / 1000000ll;
-
- /* AudioFlinger does not seem to provide any *working*
- * API to provide data in some interval and will just
- * send another buffer as soon as we process current
- * one. To prevent overflowing L2CAP socket, we need to
- * introduce some artificial delay here base on how many
- * audio frames were sent so far, i.e. if we're not
- * lagging behind audio stream, we can sleep for
- * duration of single media packet.
- */
- if (ep->samples >= expected_samples)
- usleep(input_samples * 1000000 / out->cfg.rate);
-
- return ret;
-}
-
static ssize_t sbc_encode_mediapacket(void *codec_data, const uint8_t *buffer,
size_t len, struct media_packet *mp,
size_t mp_data_len, size_t *written)
@@ -963,6 +945,25 @@ static bool write_data(struct a2dp_stream_out *out, const void *buffer,
ssize_t read;
uint32_t samples;
int ret;
+ uint64_t time_us;
+ struct timespec anchor;
+
+ time_us = ep->samples * 1000000ll / out->cfg.rate;
+
+ timespec_add(&ep->start, time_us, &anchor);
+
+ while (true) {
+ ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME,
+ &anchor, NULL);
+
+ if (!ret)
+ break;
+
+ if (ret != EINTR) {
+ error("clock_nanosleep failed (%d)", ret);
+ return false;
+ }
+ }
read = ep->codec->encode_mediapacket(ep->codec_data,
buffer + consumed,
@@ -987,9 +988,15 @@ static bool write_data(struct a2dp_stream_out *out, const void *buffer,
samples = read / (2 * popcount(out->cfg.channels));
ep->samples += samples;
- ret = write_media_packet(out, written, samples);
- if (ret < 0)
- return false;
+ while (true) {
+ ret = write(ep->fd, mp, sizeof(*mp) + written);
+
+ if (ret >= 0)
+ break;
+
+ if (errno != EINTR)
+ return false;
+ }
}
done:
--
1.8.5.4
This patch moves code which encodes data for media packet into single
function. It will simply try to put as much encoded frames as will fit
in provided media packet buffer. This is first step to make common code
responsible for media stream write and synchronizarion instead of codec
abstraction code as it is handled now.
---
android/hal-audio.c | 102 +++++++++++++++++++++++++++++-----------------------
1 file changed, 57 insertions(+), 45 deletions(-)
diff --git a/android/hal-audio.c b/android/hal-audio.c
index 36e8d2e..78889e5 100644
--- a/android/hal-audio.c
+++ b/android/hal-audio.c
@@ -129,6 +129,7 @@ struct sbc_data {
size_t in_frame_len;
size_t in_buf_size;
+ size_t out_frame_len;
size_t out_buf_size;
uint8_t *out_buf;
@@ -417,6 +418,7 @@ static int sbc_codec_init(struct audio_preset *preset, uint16_t mtu,
sbc_data->in_frame_len = in_frame_len;
sbc_data->in_buf_size = num_frames * in_frame_len;
+ sbc_data->out_frame_len = out_frame_len;
sbc_data->out_buf_size = hdr_len + num_frames * out_frame_len;
sbc_data->out_buf = calloc(1, sbc_data->out_buf_size);
@@ -536,75 +538,85 @@ static int write_media_packet(int fd, struct sbc_data *sbc_data,
return ret;
}
+static ssize_t sbc_encode_mediapacket(void *codec_data, const uint8_t *buffer,
+ size_t len, struct media_packet *mp,
+ size_t mp_data_len, size_t *written)
+{
+ struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
+ size_t consumed = 0;
+ size_t encoded = 0;
+ uint8_t frame_count = 0;
+
+ while (len - consumed >= sbc_data->in_frame_len &&
+ mp_data_len - encoded >= sbc_data->out_frame_len &&
+ frame_count < MAX_FRAMES_IN_PAYLOAD) {
+ ssize_t read;
+ ssize_t written = 0;
+
+ read = sbc_encode(&sbc_data->enc, buffer + consumed,
+ sbc_data->in_frame_len, mp->data + encoded,
+ mp_data_len - encoded, &written);
+
+ if (read < 0) {
+ error("SBC: failed to encode block at frame %d (%zd)",
+ frame_count, read);
+ break;
+ }
+
+ frame_count++;
+ consumed += read;
+ encoded += written;
+ }
+
+ *written = encoded;
+ mp->payload.frame_count = frame_count;
+
+ return consumed;
+}
+
static ssize_t sbc_write_data(void *codec_data, const void *buffer,
size_t bytes, int fd)
{
struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
size_t consumed = 0;
- size_t encoded = 0;
struct media_packet *mp = (struct media_packet *) sbc_data->out_buf;
size_t free_space = sbc_data->out_buf_size - sizeof(*mp);
- int ret;
- ssize_t bytes_read;
mp->hdr.v = 2;
mp->hdr.pt = 1;
mp->hdr.ssrc = htonl(1);
- mp->hdr.timestamp = htonl(sbc_data->timestamp);
- mp->payload.frame_count = 0;
- while (bytes - consumed >= sbc_data->in_frame_len) {
- ssize_t written = 0;
+ while (consumed < bytes) {
+ size_t written = 0;
+ ssize_t read;
+ int ret;
- bytes_read = sbc_encode(&sbc_data->enc, buffer + consumed,
- sbc_data->in_frame_len,
- mp->data + encoded, free_space,
- &written);
+ read = sbc_encode_mediapacket(codec_data, buffer + consumed,
+ bytes - consumed, mp,
+ free_space,
+ &written);
- if (bytes_read < 0) {
- error("SBC: failed to encode block (%zd)", bytes_read);
- break;
+ if (read <= 0) {
+ error("sbc_encode_mediapacket failed (%zd)", read);
+ goto done;
}
- mp->payload.frame_count++;
+ consumed += read;
- consumed += bytes_read;
- encoded += written;
- free_space -= written;
+ mp->hdr.sequence_number = htons(sbc_data->seq++);
+ mp->hdr.timestamp = htonl(sbc_data->timestamp);
/* AudioFlinger provides PCM 16bit stereo only, thus sample size
* is always 4 bytes
*/
- sbc_data->timestamp += (bytes_read / 4);
+ sbc_data->timestamp += (read / 4);
- /* write data if we either filled media packed or encoded all
- * input data
- */
- if (mp->payload.frame_count == sbc_data->frames_per_packet ||
- bytes == consumed ||
- mp->payload.frame_count ==
- MAX_FRAMES_IN_PAYLOAD) {
- mp->hdr.sequence_number = htons(sbc_data->seq++);
-
- ret = write_media_packet(fd, sbc_data, mp, encoded);
- if (ret < 0)
- return ret;
-
- encoded = 0;
- free_space = sbc_data->out_buf_size - sizeof(*mp);
- mp->hdr.timestamp = htonl(sbc_data->timestamp);
- mp->payload.frame_count = 0;
- }
- }
-
- if (consumed != bytes) {
- /* we should encode all input data
- * if we did not, something went wrong but we can't really
- * handle this so this is just sanity check
- */
- error("SBC: failed to encode complete input buffer");
+ ret = write_media_packet(fd, sbc_data, mp, written);
+ if (ret < 0)
+ return ret;
}
+done:
/* we always assume that all data was processed and sent */
return bytes;
}
--
1.8.5.4
Hi Szymon,
On 1 March 2014 12:51, Szymon Janc <[email protected]> wrote:
> Hi Andrzej,
>
> On Friday 28 of February 2014 19:01:16 Andrzej Kaczmarek wrote:
<snip>
>> + }
>> +
>> + if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) < 0) {
>> + error("fcntl(F_SETFL): %s (%d)", strerror(errno), errno);
>> + return -errno;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static bool open_endpoint(struct audio_endpoint *ep,
>> + struct audio_input_config *cfg)
>> +{
>> + struct audio_preset *preset;
>> + const struct audio_codec *codec;
>> + uint16_t mtu;
>> + int fd;
>> +
>> + if (ipc_open_stream_cmd(ep->id, &mtu, &fd, &preset) !=
>> + AUDIO_STATUS_SUCCESS)
>> + return false;
>> +
>> + if (!preset || fd < 0)
>
> If this can happen despite ipc_open_stream_cmd() returned success then freeing
> preset or closing fd should be handled here.
I'll remove this check since ipc_open_stream_cmd guarantees that both
preset and fd are valid on success.
BR,
Andrzej
Hi Andrzej,
On Friday 28 of February 2014 19:01:18 Andrzej Kaczmarek wrote:
> There's no need for codec abstraction to take care of writing and
> synchronizing actual media stream since this is something that common
> code can do regardless of codec used.
>
> Data are synchronized based on number of samples consumed from input
> stream instead of frames encoded to output stream which is also more
> reliable since it's not affected by encoder errors.
> ---
> android/hal-audio.c | 188
> +++++++++++++++++++++++++--------------------------- 1 file changed, 91
> insertions(+), 97 deletions(-)
>
> diff --git a/android/hal-audio.c b/android/hal-audio.c
> index 78889e5..b4d78ca 100644
> --- a/android/hal-audio.c
> +++ b/android/hal-audio.c
> @@ -130,17 +130,9 @@ struct sbc_data {
> size_t in_buf_size;
>
> size_t out_frame_len;
> - size_t out_buf_size;
> - uint8_t *out_buf;
>
> unsigned frame_duration;
> unsigned frames_per_packet;
> -
> - struct timespec start;
> - unsigned frames_sent;
> - uint32_t timestamp;
> -
> - uint16_t seq;
> };
>
> static inline void timespec_diff(struct timespec *a, struct timespec *b,
> @@ -162,9 +154,9 @@ static int sbc_cleanup(void *codec_data);
> static int sbc_get_config(void *codec_data, struct audio_input_config
> *config); static size_t sbc_get_buffer_size(void *codec_data);
> static size_t sbc_get_mediapacket_duration(void *codec_data);
> -static void sbc_resume(void *codec_data);
> -static ssize_t sbc_write_data(void *codec_data, const void *buffer,
> - size_t bytes, int fd);
> +static ssize_t sbc_encode_mediapacket(void *codec_data, const uint8_t
> *buffer, + size_t len, struct media_packet *mp,
> + size_t mp_data_len, size_t *written);
>
> struct audio_codec {
> uint8_t type;
> @@ -178,9 +170,9 @@ struct audio_codec {
> struct audio_input_config *config);
> size_t (*get_buffer_size) (void *codec_data);
> size_t (*get_mediapacket_duration) (void *codec_data);
> - void (*resume) (void *codec_data);
> - ssize_t (*write_data) (void *codec_data, const void *buffer,
> - size_t bytes, int fd);
> + ssize_t (*encode_mediapacket) (void *codec_data, const uint8_t *buffer,
> + size_t len, struct media_packet *mp,
> + size_t mp_data_len, size_t *written);
> };
>
> static const struct audio_codec audio_codecs[] = {
> @@ -194,8 +186,7 @@ static const struct audio_codec audio_codecs[] = {
> .get_config = sbc_get_config,
> .get_buffer_size = sbc_get_buffer_size,
> .get_mediapacket_duration = sbc_get_mediapacket_duration,
> - .resume = sbc_resume,
> - .write_data = sbc_write_data,
> + .encode_mediapacket = sbc_encode_mediapacket,
> }
> };
>
> @@ -208,6 +199,13 @@ struct audio_endpoint {
> const struct audio_codec *codec;
> void *codec_data;
> int fd;
> +
> + struct media_packet *mp;
> + size_t mp_data_len;
> +
> + uint16_t seq;
> + uint32_t samples;
> + struct timespec start;
> };
>
> static struct audio_endpoint audio_endpoints[MAX_AUDIO_ENDPOINTS];
> @@ -419,8 +417,6 @@ static int sbc_codec_init(struct audio_preset *preset,
> uint16_t mtu, sbc_data->in_buf_size = num_frames * in_frame_len;
>
> sbc_data->out_frame_len = out_frame_len;
> - sbc_data->out_buf_size = hdr_len + num_frames * out_frame_len;
> - sbc_data->out_buf = calloc(1, sbc_data->out_buf_size);
>
> sbc_data->frame_duration = sbc_get_frame_duration(&sbc_data->enc);
> sbc_data->frames_per_packet = num_frames;
> @@ -438,7 +434,6 @@ static int sbc_cleanup(void *codec_data)
> struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
>
> sbc_finish(&sbc_data->enc);
> - free(sbc_data->out_buf);
> free(codec_data);
>
> return AUDIO_STATUS_SUCCESS;
> @@ -486,28 +481,18 @@ static size_t sbc_get_mediapacket_duration(void
> *codec_data) return sbc_data->frame_duration * sbc_data->frames_per_packet;
> }
>
> -static void sbc_resume(void *codec_data)
> -{
> - struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
> -
> - DBG("");
> -
> - clock_gettime(CLOCK_MONOTONIC, &sbc_data->start);
> -
> - sbc_data->frames_sent = 0;
> - sbc_data->timestamp = 0;
> -}
> -
> -static int write_media_packet(int fd, struct sbc_data *sbc_data,
> - struct media_packet *mp, size_t data_len)
> +static int write_media_packet(struct a2dp_stream_out *out, size_t
> mp_data_len, + uint32_t input_samples)
> {
> + struct audio_endpoint *ep = out->ep;
> + struct media_packet *mp = ep->mp;
> struct timespec cur;
> struct timespec diff;
> - unsigned expected_frames;
> + uint32_t expected_samples;
> int ret;
>
> while (true) {
> - ret = write(fd, mp, sizeof(*mp) + data_len);
> + ret = write(ep->fd, mp, sizeof(*mp) + mp_data_len);
> if (ret >= 0)
> break;
>
> @@ -515,12 +500,10 @@ static int write_media_packet(int fd, struct sbc_data
> *sbc_data, return -errno;
> }
>
> - sbc_data->frames_sent += mp->payload.frame_count;
> -
> clock_gettime(CLOCK_MONOTONIC, &cur);
> - timespec_diff(&cur, &sbc_data->start, &diff);
> - expected_frames = (diff.tv_sec * 1000000 + diff.tv_nsec / 1000) /
> - sbc_data->frame_duration;
> + timespec_diff(&cur, &ep->start, &diff);
> + expected_samples = (diff.tv_sec * 1000000ll + diff.tv_nsec / 1000ll) *
> + out->cfg.rate / 1000000ll;
>
> /* AudioFlinger does not seem to provide any *working*
> * API to provide data in some interval and will just
> @@ -531,9 +514,8 @@ static int write_media_packet(int fd, struct sbc_data
> *sbc_data, * lagging behind audio stream, we can sleep for
> * duration of single media packet.
> */
> - if (sbc_data->frames_sent >= expected_frames)
> - usleep(sbc_data->frame_duration *
> - mp->payload.frame_count);
> + if (ep->samples >= expected_samples)
> + usleep(input_samples * 1000000 / out->cfg.rate);
>
> return ret;
> }
> @@ -574,53 +556,6 @@ static ssize_t sbc_encode_mediapacket(void *codec_data,
> const uint8_t *buffer, return consumed;
> }
>
> -static ssize_t sbc_write_data(void *codec_data, const void *buffer,
> - size_t bytes, int fd)
> -{
> - struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
> - size_t consumed = 0;
> - struct media_packet *mp = (struct media_packet *) sbc_data->out_buf;
> - size_t free_space = sbc_data->out_buf_size - sizeof(*mp);
> -
> - mp->hdr.v = 2;
> - mp->hdr.pt = 1;
> - mp->hdr.ssrc = htonl(1);
> -
> - while (consumed < bytes) {
> - size_t written = 0;
> - ssize_t read;
> - int ret;
> -
> - read = sbc_encode_mediapacket(codec_data, buffer + consumed,
> - bytes - consumed, mp,
> - free_space,
> - &written);
> -
> - if (read <= 0) {
> - error("sbc_encode_mediapacket failed (%zd)", read);
> - goto done;
> - }
> -
> - consumed += read;
> -
> - mp->hdr.sequence_number = htons(sbc_data->seq++);
> - mp->hdr.timestamp = htonl(sbc_data->timestamp);
> -
> - /* AudioFlinger provides PCM 16bit stereo only, thus sample size
> - * is always 4 bytes
> - */
> - sbc_data->timestamp += (read / 4);
> -
> - ret = write_media_packet(fd, sbc_data, mp, written);
> - if (ret < 0)
> - return ret;
> - }
> -
> -done:
> - /* we always assume that all data was processed and sent */
> - return bytes;
> -}
> -
> static int audio_ipc_cmd(uint8_t service_id, uint8_t opcode, uint16_t len,
> void *param, size_t *rsp_len, void *rsp, int *fd)
> {
> @@ -970,6 +905,13 @@ static bool open_endpoint(struct audio_endpoint *ep,
> codec->init(preset, mtu, &ep->codec_data);
> codec->get_config(ep->codec_data, cfg);
>
> + ep->mp = calloc(mtu, 1);
> + ep->mp->hdr.v = 2;
> + ep->mp->hdr.pt = 1;
> + ep->mp->hdr.ssrc = htonl(1);
> +
> + ep->mp_data_len = mtu - sizeof(*ep->mp);
> +
> free(preset);
>
> return true;
> @@ -983,6 +925,8 @@ static void close_endpoint(struct audio_endpoint *ep)
> ep->fd = -1;
> }
>
> + free(ep->mp);
> +
> ep->codec->cleanup(ep->codec_data);
> ep->codec_data = NULL;
> }
> @@ -1002,10 +946,59 @@ static void downmix_to_mono(struct a2dp_stream_out
> *out, const uint8_t *buffer, }
> }
>
> +static bool write_data(struct a2dp_stream_out *out, const void *buffer,
> + size_t bytes)
> +{
> + struct audio_endpoint *ep = out->ep;
> + struct media_packet *mp = (struct media_packet *) ep->mp;
> + size_t free_space = ep->mp_data_len;
> + size_t consumed = 0;
> +
> + while (consumed < bytes) {
> + size_t written = 0;
> + ssize_t read;
> + uint32_t samples;
> + int ret;
> +
> + read = ep->codec->encode_mediapacket(ep->codec_data,
> + buffer + consumed,
> + bytes - consumed, mp,
> + free_space, &written);
> +
> + /* This is non-fatal and we can just assume buffer was processed
> + * properly and wait for next one.
> + */
> + if (read <= 0)
> + goto done;
I'd just return true here.
> +
> + consumed += read;
> +
> + mp->hdr.sequence_number = htons(ep->seq++);
> + mp->hdr.timestamp = htonl(ep->samples);
> +
> + /* AudioFlinger provides 16bit PCM, so sample size is 2 bytes
> + * multipled by number of channels. Number of channels is simply
> + * number of bits set in channels mask.
> + */
> + samples = read / (2 * popcount(out->cfg.channels));
> + ep->samples += samples;
> +
> + ret = write_media_packet(out, written, samples);
> + if (ret < 0)
> + return false;
> + }
> +
> +done:
> + return true;
> +}
> +
> +
minor: double empty line here.
> static ssize_t out_write(struct audio_stream_out *stream, const void
> *buffer, size_t bytes)
> {
> struct a2dp_stream_out *out = (struct a2dp_stream_out *) stream;
> + const void *in_buf = buffer;
> + size_t in_len = bytes;
>
> /* just return in case we're closing */
> if (out->audio_state == AUDIO_A2DP_STATE_NONE)
> @@ -1018,7 +1011,8 @@ static ssize_t out_write(struct audio_stream_out
> *stream, const void *buffer, if (ipc_resume_stream_cmd(out->ep->id) !=
> AUDIO_STATUS_SUCCESS) return -1;
>
> - out->ep->codec->resume(out->ep->codec_data);
> + clock_gettime(CLOCK_MONOTONIC, &out->ep->start);
> + out->ep->samples = 0;
>
> out->audio_state = AUDIO_A2DP_STATE_STARTED;
> }
> @@ -1048,14 +1042,14 @@ static ssize_t out_write(struct audio_stream_out
> *stream, const void *buffer,
>
> downmix_to_mono(out, buffer, bytes);
>
> - return out->ep->codec->write_data(out->ep->codec_data,
> - out->downmix_buf,
> - bytes / 2,
> - out->ep->fd) * 2;
> + in_buf = out->downmix_buf;
> + in_len = bytes / 2;
> }
>
> - return out->ep->codec->write_data(out->ep->codec_data, buffer,
> - bytes, out->ep->fd);
> + if (!write_data(out, in_buf, in_len))
> + return -1;
> +
> + return bytes;
> }
>
> static uint32_t out_get_sample_rate(const struct audio_stream *stream)
--
BR
Szymon Janc
Hi Andrzej,
On Friday 28 of February 2014 19:01:16 Andrzej Kaczmarek wrote:
> ---
> android/hal-audio.c | 112
> +++++++++++++++++++++++++++++----------------------- 1 file changed, 63
> insertions(+), 49 deletions(-)
>
> diff --git a/android/hal-audio.c b/android/hal-audio.c
> index e1f3f0d..36e8d2e 100644
> --- a/android/hal-audio.c
> +++ b/android/hal-audio.c
> @@ -914,6 +914,67 @@ static void unregister_endpoints(void)
> }
> }
>
> +static int set_blocking(int fd)
> +{
> + int flags;
> +
> + flags = fcntl(fd, F_GETFL, 0);
> + if (flags < 0) {
> + error("fcntl(F_GETFL): %s (%d)", strerror(errno), errno);
> + return -errno;
I'm aware that this function is just moved up but just to not forget it :)
error() can modify errno so it should be saved before call.
(this should be fixed in multiple places)
Also strerror() is not thread safe, we should probably start using
strerror_r() (eg. have wrapping macros that we could use in multithreaded
code). BT HAL has same problem so I guess this can be done outside of this
patchset.
> + }
> +
> + if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) < 0) {
> + error("fcntl(F_SETFL): %s (%d)", strerror(errno), errno);
> + return -errno;
> + }
> +
> + return 0;
> +}
> +
> +static bool open_endpoint(struct audio_endpoint *ep,
> + struct audio_input_config *cfg)
> +{
> + struct audio_preset *preset;
> + const struct audio_codec *codec;
> + uint16_t mtu;
> + int fd;
> +
> + if (ipc_open_stream_cmd(ep->id, &mtu, &fd, &preset) !=
> + AUDIO_STATUS_SUCCESS)
> + return false;
> +
> + if (!preset || fd < 0)
If this can happen despite ipc_open_stream_cmd() returned success then freeing
preset or closing fd should be handled here.
> + return false;
> +
> + if (set_blocking(fd) < 0) {
> + free(preset);
Shouldn't fd be closed here?
> + return false;
> + }
> +
> + ep->fd = fd;
> +
> + codec = ep->codec;
> + codec->init(preset, mtu, &ep->codec_data);
> + codec->get_config(ep->codec_data, cfg);
> +
> + free(preset);
> +
> + return true;
> +}
> +
> +static void close_endpoint(struct audio_endpoint *ep)
> +{
> + ipc_close_stream_cmd(ep->id);
> + if (ep->fd >= 0) {
> + close(ep->fd);
> + ep->fd = -1;
> + }
> +
> + ep->codec->cleanup(ep->codec_data);
> + ep->codec_data = NULL;
> +}
> +
> static void downmix_to_mono(struct a2dp_stream_out *out, const uint8_t
> *buffer, size_t bytes)
> {
> @@ -1260,24 +1321,6 @@ static int in_remove_audio_effect(const struct
> audio_stream *stream, return -ENOSYS;
> }
>
> -static int set_blocking(int fd)
> -{
> - int flags;
> -
> - flags = fcntl(fd, F_GETFL, 0);
> - if (flags < 0) {
> - error("fcntl(F_GETFL): %s (%d)", strerror(errno), errno);
> - return -errno;
> - }
> -
> - if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) < 0) {
> - error("fcntl(F_SETFL): %s (%d)", strerror(errno), errno);
> - return -errno;
> - }
> -
> - return 0;
> -}
> -
> static int audio_open_output_stream(struct audio_hw_device *dev,
> audio_io_handle_t handle,
> audio_devices_t devices,
> @@ -1288,10 +1331,6 @@ static int audio_open_output_stream(struct
> audio_hw_device *dev, {
> struct a2dp_audio_dev *a2dp_dev = (struct a2dp_audio_dev *) dev;
> struct a2dp_stream_out *out;
> - struct audio_preset *preset;
> - const struct audio_codec *codec;
> - uint16_t mtu;
> - int fd;
>
> out = calloc(1, sizeof(struct a2dp_stream_out));
> if (!out)
> @@ -1319,29 +1358,12 @@ static int audio_open_output_stream(struct
> audio_hw_device *dev, /* TODO: for now we always use endpoint 0 */
> out->ep = &audio_endpoints[0];
>
> - if (ipc_open_stream_cmd(out->ep->id, &mtu, &fd, &preset) !=
> - AUDIO_STATUS_SUCCESS)
> - goto fail;
> -
> - if (!preset || fd < 0)
> - goto fail;
> -
> - if (set_blocking(fd) < 0) {
> - free(preset);
> + if (!open_endpoint(out->ep, &out->cfg))
> goto fail;
> - }
> -
> - out->ep->fd = fd;
> - codec = out->ep->codec;
> -
> - codec->init(preset, mtu, &out->ep->codec_data);
> - codec->get_config(out->ep->codec_data, &out->cfg);
>
> DBG("rate=%d channels=%d format=%d", out->cfg.rate,
> out->cfg.channels, out->cfg.format);
>
> - free(preset);
> -
> if (out->cfg.channels == AUDIO_CHANNEL_OUT_MONO) {
> out->downmix_buf = malloc(FIXED_BUFFER_SIZE / 2);
> if (!out->downmix_buf)
> @@ -1367,18 +1389,10 @@ static void audio_close_output_stream(struct
> audio_hw_device *dev, {
> struct a2dp_audio_dev *a2dp_dev = (struct a2dp_audio_dev *) dev;
> struct a2dp_stream_out *out = (struct a2dp_stream_out *) stream;
> - struct audio_endpoint *ep = a2dp_dev->out->ep;
>
> DBG("");
>
> - ipc_close_stream_cmd(ep->id);
> - if (ep->fd >= 0) {
> - close(ep->fd);
> - ep->fd = -1;
> - }
> -
> - ep->codec->cleanup(ep->codec_data);
> - ep->codec_data = NULL;
> + close_endpoint(a2dp_dev->out->ep);
>
> free(out->downmix_buf);
--
BR
Szymon Janc