|
5 | 5 | #include <vector>
|
6 | 6 | #include "talk/owt/sdk/base/eventtrigger.h"
|
7 | 7 | #include "talk/owt/sdk/base/functionalobserver.h"
|
8 |
| -#include "talk/owt/sdk/base/sdputils.h" |
9 | 8 | #include "talk/owt/sdk/base/sysinfo.h"
|
10 | 9 | #include "talk/owt/sdk/p2p/p2ppeerconnectionchannel.h"
|
11 | 10 | #include "webrtc/rtc_base/logging.h"
|
|
14 | 13 | using namespace rtc;
|
15 | 14 | namespace owt {
|
16 | 15 | namespace p2p {
|
| 16 | + |
| 17 | +static const std:: |
| 18 | + unordered_map<owt::base::AudioCodec, const std::string, EnumClassHash> |
| 19 | + audio_codec_names = {{owt::base::AudioCodec::kOpus, "OPUS"}, |
| 20 | + {owt::base::AudioCodec::kIsac, "ISAC"}, |
| 21 | + {owt::base::AudioCodec::kG722, "G722"}, |
| 22 | + {owt::base::AudioCodec::kPcmu, "PCMU"}, |
| 23 | + {owt::base::AudioCodec::kIlbc, "ILBC"}, |
| 24 | + {owt::base::AudioCodec::kPcma, "PCMA"}}; |
| 25 | +static const std:: |
| 26 | + unordered_map<owt::base::VideoCodec, const std::string, EnumClassHash> |
| 27 | + video_codec_names = {{owt::base::VideoCodec::kVp8, "VP8"}, |
| 28 | + {owt::base::VideoCodec::kVp9, "VP9"}, |
| 29 | + {owt::base::VideoCodec::kH264, "H264"}, |
| 30 | + {owt::base::VideoCodec::kH265, "H265"}, |
| 31 | + {owt::base::VideoCodec::kAv1, "AV1"}}; |
| 32 | + |
17 | 33 | using std::string;
|
18 | 34 | enum P2PPeerConnectionChannel::SessionState : int {
|
19 | 35 | kSessionStateReady = 1, // Indicate the channel is ready. This is the initial state.
|
@@ -636,16 +652,6 @@ void P2PPeerConnectionChannel::OnSignalingChange(
|
636 | 652 | RTC_LOG(LS_ERROR) << "Error parsing local description.";
|
637 | 653 | RTC_DCHECK(false);
|
638 | 654 | }
|
639 |
| - std::vector<AudioCodec> audio_codecs; |
640 |
| - for (auto& audio_enc_param : configuration_.audio) { |
641 |
| - audio_codecs.push_back(audio_enc_param.codec.name); |
642 |
| - } |
643 |
| - sdp_string = SdpUtils::SetPreferAudioCodecs(sdp_string, audio_codecs); |
644 |
| - std::vector<VideoCodec> video_codecs; |
645 |
| - for (auto& video_enc_param : configuration_.video) { |
646 |
| - video_codecs.push_back(video_enc_param.codec.name); |
647 |
| - } |
648 |
| - sdp_string = SdpUtils::SetPreferVideoCodecs(sdp_string, video_codecs); |
649 | 655 | std::unique_ptr<webrtc::SessionDescriptionInterface> new_desc(
|
650 | 656 | webrtc::CreateSessionDescription(pending_remote_sdp_->type(),
|
651 | 657 | sdp_string, nullptr));
|
@@ -827,16 +833,6 @@ void P2PPeerConnectionChannel::OnCreateSessionDescriptionSuccess(
|
827 | 833 | RTC_LOG(LS_ERROR) << "Error parsing local description.";
|
828 | 834 | RTC_DCHECK(false);
|
829 | 835 | }
|
830 |
| - std::vector<AudioCodec> audio_codecs; |
831 |
| - for (auto& audio_enc_param : configuration_.audio) { |
832 |
| - audio_codecs.push_back(audio_enc_param.codec.name); |
833 |
| - } |
834 |
| - sdp_string = SdpUtils::SetPreferAudioCodecs(sdp_string, audio_codecs); |
835 |
| - std::vector<VideoCodec> video_codecs; |
836 |
| - for (auto& video_enc_param : configuration_.video) { |
837 |
| - video_codecs.push_back(video_enc_param.codec.name); |
838 |
| - } |
839 |
| - sdp_string = SdpUtils::SetPreferVideoCodecs(sdp_string, video_codecs); |
840 | 836 | webrtc::SessionDescriptionInterface* new_desc(
|
841 | 837 | webrtc::CreateSessionDescription(desc->type(), sdp_string, nullptr));
|
842 | 838 | peer_connection_->SetLocalDescription(observer.get(), new_desc);
|
@@ -1173,16 +1169,123 @@ void P2PPeerConnectionChannel::DrainPendingStreams() {
|
1173 | 1169 | track_info[kTrackIdKey] = track->id();
|
1174 | 1170 | track_info[kTrackSourceKey] = audio_track_source;
|
1175 | 1171 | track_sources.append(track_info);
|
1176 |
| - peer_connection_->AddTrack(track, {media_stream->id()}); |
| 1172 | + |
| 1173 | + webrtc::RtpTransceiverInit init; |
| 1174 | + init.direction = webrtc::RtpTransceiverDirection::kSendRecv; |
| 1175 | + init.stream_ids.push_back(media_stream->id()); |
| 1176 | + if (configuration_.audio.size() > 0) { |
| 1177 | + // OWT APIs allow different bitrate settings for different codecs. |
| 1178 | + // However, this is not supported by WebRTC. We take the first |
| 1179 | + // codec's setting here. Consider to change OWT API in the future. |
| 1180 | + const auto& audio_encoding_paramters = |
| 1181 | + configuration_.audio[0].rtp_encoding_parameters; |
| 1182 | + std::vector<webrtc::RtpEncodingParameters> |
| 1183 | + rtp_encoding_parameters_list; |
| 1184 | + rtp_encoding_parameters_list.resize(audio_encoding_paramters.size()); |
| 1185 | + std::transform( |
| 1186 | + audio_encoding_paramters.begin(), audio_encoding_paramters.end(), |
| 1187 | + rtp_encoding_parameters_list.begin(), |
| 1188 | + [](const RtpEncodingParameters& p) { |
| 1189 | + webrtc::RtpEncodingParameters encoding_paramters; |
| 1190 | + encoding_paramters.active = p.active; |
| 1191 | + if (p.max_bitrate_bps != 0) { |
| 1192 | + encoding_paramters.max_bitrate_bps = p.max_bitrate_bps; |
| 1193 | + } |
| 1194 | + if (p.max_framerate != 0) { |
| 1195 | + encoding_paramters.max_framerate = p.max_framerate; |
| 1196 | + } |
| 1197 | + if (p.rid != "") { |
| 1198 | + encoding_paramters.rid = p.rid; |
| 1199 | + } |
| 1200 | + return encoding_paramters; |
| 1201 | + }); |
| 1202 | + init.send_encodings = rtp_encoding_parameters_list; |
| 1203 | + } |
| 1204 | + auto transceiver = AddTransceiver(track, init); |
| 1205 | + if (!configuration_.audio.empty() && transceiver.ok()) { |
| 1206 | + std::vector<webrtc::RtpCodecCapability> codecs; |
| 1207 | + auto capabilities = |
| 1208 | + PeerConnectionDependencyFactory::Get()->GetSenderCapabilities( |
| 1209 | + "audio"); |
| 1210 | + for (const auto& audio : configuration_.audio) { |
| 1211 | + for (auto& c : capabilities->codecs) { |
| 1212 | + if (c.name != audio_codec_names.at(audio.codec.name)) { |
| 1213 | + continue; |
| 1214 | + } |
| 1215 | + if (audio.codec.channel_count != 0 && |
| 1216 | + c.num_channels != |
| 1217 | + static_cast<int>(audio.codec.channel_count)) { |
| 1218 | + continue; |
| 1219 | + } |
| 1220 | + codecs.push_back(c); |
| 1221 | + } |
| 1222 | + } |
| 1223 | + transceiver.value()->SetCodecPreferences(codecs); |
| 1224 | + } |
1177 | 1225 | }
|
1178 | 1226 | for (const auto& track : media_stream->GetVideoTracks()) {
|
| 1227 | + RTC_LOG(LS_INFO)<<"GetVideoTracks(), config empty: "<<configuration_.video.empty(); |
1179 | 1228 | // Signaling.
|
1180 | 1229 | stream_tracks.append(track->id());
|
1181 | 1230 | stream_sources[kStreamVideoSourceKey] = video_track_source;
|
1182 | 1231 | track_info[kTrackIdKey] = track->id();
|
1183 | 1232 | track_info[kTrackSourceKey] = video_track_source;
|
1184 | 1233 | track_sources.append(track_info);
|
1185 |
| - peer_connection_->AddTrack(track, {media_stream->id()}); |
| 1234 | + |
| 1235 | + webrtc::RtpTransceiverInit init; |
| 1236 | + init.direction = webrtc::RtpTransceiverDirection::kSendRecv; |
| 1237 | + init.stream_ids.push_back(media_stream->id()); |
| 1238 | + if (!configuration_.video.empty()) { |
| 1239 | + // OWT APIs allow different bitrate settings for different codecs. |
| 1240 | + // However, this is not supported by WebRTC. We take the first |
| 1241 | + // codec's setting here. Consider to change OWT API in the future. |
| 1242 | + const auto& video_encoding_paramters = |
| 1243 | + configuration_.video[0].rtp_encoding_parameters; |
| 1244 | + std::vector<webrtc::RtpEncodingParameters> |
| 1245 | + rtp_encoding_parameters_list; |
| 1246 | + rtp_encoding_parameters_list.resize(video_encoding_paramters.size()); |
| 1247 | + std::transform( |
| 1248 | + video_encoding_paramters.begin(), video_encoding_paramters.end(), |
| 1249 | + rtp_encoding_parameters_list.begin(), |
| 1250 | + [](const RtpEncodingParameters& p) { |
| 1251 | + webrtc::RtpEncodingParameters encoding_paramters; |
| 1252 | + encoding_paramters.active = p.active; |
| 1253 | + if (p.max_bitrate_bps != 0) { |
| 1254 | + encoding_paramters.max_bitrate_bps = p.max_bitrate_bps; |
| 1255 | + } |
| 1256 | + if (p.max_framerate != 0) { |
| 1257 | + encoding_paramters.max_framerate = p.max_framerate; |
| 1258 | + } |
| 1259 | + if (p.rid != "") { |
| 1260 | + encoding_paramters.rid = p.rid; |
| 1261 | + } |
| 1262 | + if (p.num_temporal_layers != 0) { |
| 1263 | + encoding_paramters.num_temporal_layers = |
| 1264 | + p.num_temporal_layers; |
| 1265 | + } |
| 1266 | + if (p.scale_resolution_down_by != 0) { |
| 1267 | + encoding_paramters.scale_resolution_down_by = |
| 1268 | + p.scale_resolution_down_by; |
| 1269 | + } |
| 1270 | + return encoding_paramters; |
| 1271 | + }); |
| 1272 | + init.send_encodings = rtp_encoding_parameters_list; |
| 1273 | + } |
| 1274 | + auto transceiver = AddTransceiver(track, init); |
| 1275 | + if (!configuration_.video.empty() && transceiver.ok()) { |
| 1276 | + std::vector<webrtc::RtpCodecCapability> codecs; |
| 1277 | + auto capabilities = |
| 1278 | + PeerConnectionDependencyFactory::Get()->GetSenderCapabilities( |
| 1279 | + "video"); |
| 1280 | + for (const auto& video : configuration_.video) { |
| 1281 | + for (auto& c : capabilities->codecs) { |
| 1282 | + if (c.name == video_codec_names.at(video.codec.name)) { |
| 1283 | + codecs.push_back(c); |
| 1284 | + } |
| 1285 | + } |
| 1286 | + } |
| 1287 | + transceiver.value()->SetCodecPreferences(codecs); |
| 1288 | + } |
1186 | 1289 | }
|
1187 | 1290 | // The second signaling message of track sources to remote peer.
|
1188 | 1291 | Json::Value json_track_sources;
|
|
0 commit comments