Rename EncodedImage::_length --> size_, and make private.
Use size() accessor function. Also replace most nearby uses of _buffer with data(). Bug: webrtc:9378 Change-Id: I1ac3459612f7c6151bd057d05448da1c4e1c6e3d Reviewed-on: https://webrtc-review.googlesource.com/c/116783 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Cr-Commit-Position: refs/heads/master@{#26273}
This commit is contained in:
@ -161,7 +161,7 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
if (decode_complete_callback_ == NULL) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
if (input_image._buffer == NULL && input_image._length > 0) {
|
||||
if (input_image.data() == NULL && input_image.size() > 0) {
|
||||
// Reset to avoid requesting key frames too often.
|
||||
if (propagation_cnt_ > 0)
|
||||
propagation_cnt_ = 0;
|
||||
@ -249,10 +249,10 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
}
|
||||
|
||||
uint8_t* buffer = input_image._buffer;
|
||||
if (input_image._length == 0) {
|
||||
if (input_image.size() == 0) {
|
||||
buffer = NULL; // Triggers full frame concealment.
|
||||
}
|
||||
if (vpx_codec_decode(decoder_, buffer, input_image._length, 0,
|
||||
if (vpx_codec_decode(decoder_, buffer, input_image.size(), 0,
|
||||
kDecodeDeadlineRealtime)) {
|
||||
// Reset to avoid requesting key frames too often.
|
||||
if (propagation_cnt_ > 0) {
|
||||
|
||||
@ -861,7 +861,7 @@ void LibvpxVp8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
||||
int qp = 0;
|
||||
vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp);
|
||||
temporal_layers_[stream_idx]->OnEncodeDone(
|
||||
timestamp, encoded_images_[encoder_idx]._length,
|
||||
timestamp, encoded_images_[encoder_idx].size(),
|
||||
(pkt.data.frame.flags & VPX_FRAME_IS_KEY) != 0, qp, vp8Info);
|
||||
}
|
||||
|
||||
@ -871,7 +871,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
|
||||
++encoder_idx, --stream_idx) {
|
||||
vpx_codec_iter_t iter = NULL;
|
||||
encoded_images_[encoder_idx]._length = 0;
|
||||
encoded_images_[encoder_idx].set_size(0);
|
||||
encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
|
||||
CodecSpecificInfo codec_specific;
|
||||
const vpx_codec_cx_pkt_t* pkt = NULL;
|
||||
@ -879,7 +879,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
NULL) {
|
||||
switch (pkt->kind) {
|
||||
case VPX_CODEC_CX_FRAME_PKT: {
|
||||
size_t length = encoded_images_[encoder_idx]._length;
|
||||
size_t length = encoded_images_[encoder_idx].size();
|
||||
if (pkt->data.frame.sz + length >
|
||||
encoded_images_[encoder_idx].capacity()) {
|
||||
uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length];
|
||||
@ -890,8 +890,8 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
}
|
||||
memcpy(&encoded_images_[encoder_idx]._buffer[length],
|
||||
pkt->data.frame.buf, pkt->data.frame.sz);
|
||||
encoded_images_[encoder_idx]._length += pkt->data.frame.sz;
|
||||
assert(length <= encoded_images_[encoder_idx].capacity());
|
||||
encoded_images_[encoder_idx].set_size(
|
||||
encoded_images_[encoder_idx].size() + pkt->data.frame.sz);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -921,9 +921,9 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
encoded_images_[encoder_idx].SetColorSpace(input_image.color_space());
|
||||
|
||||
if (send_stream_[stream_idx]) {
|
||||
if (encoded_images_[encoder_idx]._length > 0) {
|
||||
if (encoded_images_[encoder_idx].size() > 0) {
|
||||
TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx,
|
||||
encoded_images_[encoder_idx]._length);
|
||||
encoded_images_[encoder_idx].size());
|
||||
encoded_images_[encoder_idx]._encodedHeight =
|
||||
codec_.simulcastStream[stream_idx].height;
|
||||
encoded_images_[encoder_idx]._encodedWidth =
|
||||
@ -937,7 +937,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
} else if (!temporal_layers_[stream_idx]
|
||||
->SupportsEncoderFrameDropping()) {
|
||||
result = WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT;
|
||||
if (encoded_images_[encoder_idx]._length == 0) {
|
||||
if (encoded_images_[encoder_idx].size() == 0) {
|
||||
// Dropped frame that will be re-encoded.
|
||||
temporal_layers_[stream_idx]->OnEncodeDone(input_image.timestamp(), 0,
|
||||
false, 0, nullptr);
|
||||
|
||||
@ -98,8 +98,8 @@ class TestVp8Impl : public VideoCodecUnitTest {
|
||||
|
||||
void VerifyQpParser(const EncodedImage& encoded_frame) const {
|
||||
int qp;
|
||||
EXPECT_GT(encoded_frame._length, 0u);
|
||||
ASSERT_TRUE(vp8::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
|
||||
EXPECT_GT(encoded_frame.size(), 0u);
|
||||
ASSERT_TRUE(vp8::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
|
||||
EXPECT_EQ(encoded_frame.qp_, qp) << "Encoder QP != parsed bitstream QP.";
|
||||
}
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user