From 8f2e92423b83d898e1f01385f40cd0d23f2bf0b4 Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Wed, 17 Sep 2025 10:11:35 +0300 Subject: [PATCH 01/15] Implement fastlane ios --- .gitignore | 2 + fastlane/Fastfile | 7 + fastlane/Gemfile | 13 ++ fastlane/Gemfile.lock | 308 ++++++++++++++++++++++++++++++++++ fastlane/Matchfile | 5 + fastlane/Pluginfile | 5 + fastlane/build_xcframework.sh | 24 +++ fastlane/lanes/gclient | 64 +++++++ fastlane/lanes/ios | 305 +++++++++++++++++++++++++++++++++ fastlane/lanes/utilities | 69 ++++++++ 10 files changed, 802 insertions(+) create mode 100644 fastlane/Fastfile create mode 100644 fastlane/Gemfile create mode 100644 fastlane/Gemfile.lock create mode 100644 fastlane/Matchfile create mode 100644 fastlane/Pluginfile create mode 100755 fastlane/build_xcframework.sh create mode 100644 fastlane/lanes/gclient create mode 100644 fastlane/lanes/ios create mode 100644 fastlane/lanes/utilities diff --git a/.gitignore b/.gitignore index 3ff72b28b1..ab926e592f 100644 --- a/.gitignore +++ b/.gitignore @@ -79,3 +79,5 @@ /node_modules /libwebrtc /args.txt +fastlane/vendor +out_ios_libs diff --git a/fastlane/Fastfile b/fastlane/Fastfile new file mode 100644 index 0000000000..52b91dd388 --- /dev/null +++ b/fastlane/Fastfile @@ -0,0 +1,7 @@ +require 'json' +require 'net/http' +require 'fileutils' + +import "./lanes/utilities" +import "./lanes/gclient" +import "./lanes/ios" \ No newline at end of file diff --git a/fastlane/Gemfile b/fastlane/Gemfile new file mode 100644 index 0000000000..86eea32460 --- /dev/null +++ b/fastlane/Gemfile @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +source 'https://rubygems.org' + +git_source(:github) { |repo_name| "https://github.com/#{repo_name}" } + +gem 'cocoapods' +gem 'fastlane' +gem 'json' +gem 'plist' + +plugins_path = File.join(File.dirname(__FILE__), 'Pluginfile') +eval_gemfile(plugins_path) if File.exist?(plugins_path) \ No newline at end of file diff --git a/fastlane/Gemfile.lock b/fastlane/Gemfile.lock new file mode 100644 index 0000000000..a01c301f46 --- /dev/null +++ b/fastlane/Gemfile.lock @@ -0,0 +1,308 @@ +GEM + remote: https://rubygems.org/ + specs: + CFPropertyList (3.0.7) + base64 + nkf + rexml + activesupport (7.2.2.1) + base64 + benchmark (>= 0.3) + bigdecimal + concurrent-ruby (~> 1.0, >= 1.3.1) + connection_pool (>= 2.2.5) + drb + i18n (>= 1.6, < 2) + logger (>= 1.4.2) + minitest (>= 5.1) + securerandom (>= 0.3) + tzinfo (~> 2.0, >= 2.0.5) + addressable (2.8.7) + public_suffix (>= 2.0.2, < 7.0) + algoliasearch (1.27.5) + httpclient (~> 2.8, >= 2.8.3) + json (>= 1.5.1) + artifactory (3.0.17) + atomos (0.1.3) + aws-eventstream (1.4.0) + aws-partitions (1.1131.0) + aws-sdk-core (3.226.3) + aws-eventstream (~> 1, >= 1.3.0) + aws-partitions (~> 1, >= 1.992.0) + aws-sigv4 (~> 1.9) + base64 + jmespath (~> 1, >= 1.6.1) + logger + aws-sdk-kms (1.106.0) + aws-sdk-core (~> 3, >= 3.225.0) + aws-sigv4 (~> 1.5) + aws-sdk-s3 (1.193.0) + aws-sdk-core (~> 3, >= 3.225.0) + aws-sdk-kms (~> 1) + aws-sigv4 (~> 1.5) + aws-sigv4 (1.12.1) + aws-eventstream (~> 1, >= 1.0.2) + babosa (1.0.4) + base64 (0.3.0) + benchmark (0.4.1) + bigdecimal (3.2.2) + claide (1.1.0) + cocoapods (1.16.2) + addressable (~> 2.8) + claide (>= 1.0.2, < 2.0) + cocoapods-core (= 1.16.2) + cocoapods-deintegrate (>= 1.0.3, < 2.0) + cocoapods-downloader (>= 2.1, < 3.0) + cocoapods-plugins (>= 1.0.0, < 2.0) + cocoapods-search (>= 1.0.0, < 2.0) + cocoapods-trunk (>= 1.6.0, < 2.0) + cocoapods-try (>= 1.1.0, < 2.0) + colored2 (~> 3.1) + escape (~> 0.0.4) + fourflusher (>= 2.3.0, < 3.0) + gh_inspector (~> 1.0) + molinillo (~> 0.8.0) + nap (~> 1.0) + ruby-macho (>= 2.3.0, < 3.0) + xcodeproj (>= 1.27.0, < 2.0) + cocoapods-core (1.16.2) + activesupport (>= 5.0, < 8) + addressable (~> 2.8) + algoliasearch (~> 1.0) + concurrent-ruby (~> 1.1) + fuzzy_match (~> 2.0.4) + nap (~> 1.0) + netrc (~> 0.11) + public_suffix (~> 4.0) + typhoeus (~> 1.0) + cocoapods-deintegrate (1.0.5) + cocoapods-downloader (2.1) + cocoapods-plugins (1.0.0) + nap + cocoapods-search (1.0.1) + cocoapods-trunk (1.6.0) + nap (>= 0.8, < 2.0) + netrc (~> 0.11) + cocoapods-try (1.2.0) + colored (1.2) + colored2 (3.1.2) + commander (4.6.0) + highline (~> 2.0.0) + concurrent-ruby (1.3.5) + connection_pool (2.5.3) + declarative (0.0.20) + digest-crc (0.7.0) + rake (>= 12.0.0, < 14.0.0) + domain_name (0.6.20240107) + dotenv (2.8.1) + drb (2.2.3) + emoji_regex (3.2.3) + escape (0.0.4) + ethon (0.16.0) + ffi (>= 1.15.0) + excon (0.112.0) + faraday (1.10.4) + faraday-em_http (~> 1.0) + faraday-em_synchrony (~> 1.0) + faraday-excon (~> 1.1) + faraday-httpclient (~> 1.0) + faraday-multipart (~> 1.0) + faraday-net_http (~> 1.0) + faraday-net_http_persistent (~> 1.0) + faraday-patron (~> 1.0) + faraday-rack (~> 1.0) + faraday-retry (~> 1.0) + ruby2_keywords (>= 0.0.4) + faraday-cookie_jar (0.0.7) + faraday (>= 0.8.0) + http-cookie (~> 1.0.0) + faraday-em_http (1.0.0) + faraday-em_synchrony (1.0.1) + faraday-excon (1.1.0) + faraday-httpclient (1.0.1) + faraday-multipart (1.1.1) + multipart-post (~> 2.0) + faraday-net_http (1.0.2) + faraday-net_http_persistent (1.2.0) + faraday-patron (1.0.0) + faraday-rack (1.0.0) + faraday-retry (1.0.3) + faraday_middleware (1.2.1) + faraday (~> 1.0) + fastimage (2.4.0) + fastlane (2.228.0) + CFPropertyList (>= 2.3, < 4.0.0) + addressable (>= 2.8, < 3.0.0) + artifactory (~> 3.0) + aws-sdk-s3 (~> 1.0) + babosa (>= 1.0.3, < 2.0.0) + bundler (>= 1.12.0, < 3.0.0) + colored (~> 1.2) + commander (~> 4.6) + dotenv (>= 2.1.1, < 3.0.0) + emoji_regex (>= 0.1, < 4.0) + excon (>= 0.71.0, < 1.0.0) + faraday (~> 1.0) + faraday-cookie_jar (~> 0.0.6) + faraday_middleware (~> 1.0) + fastimage (>= 2.1.0, < 3.0.0) + fastlane-sirp (>= 1.0.0) + gh_inspector (>= 1.1.2, < 2.0.0) + google-apis-androidpublisher_v3 (~> 0.3) + google-apis-playcustomapp_v1 (~> 0.1) + google-cloud-env (>= 1.6.0, < 2.0.0) + google-cloud-storage (~> 1.31) + highline (~> 2.0) + http-cookie (~> 1.0.5) + json (< 3.0.0) + jwt (>= 2.1.0, < 3) + mini_magick (>= 4.9.4, < 5.0.0) + multipart-post (>= 2.0.0, < 3.0.0) + naturally (~> 2.2) + optparse (>= 0.1.1, < 1.0.0) + plist (>= 3.1.0, < 4.0.0) + rubyzip (>= 2.0.0, < 3.0.0) + security (= 0.1.5) + simctl (~> 1.6.3) + terminal-notifier (>= 2.0.0, < 3.0.0) + terminal-table (~> 3) + tty-screen (>= 0.6.3, < 1.0.0) + tty-spinner (>= 0.8.0, < 1.0.0) + word_wrap (~> 1.0.0) + xcodeproj (>= 1.13.0, < 2.0.0) + xcpretty (~> 0.4.1) + xcpretty-travis-formatter (>= 0.0.3, < 2.0.0) + fastlane-plugin-stream_actions (0.3.90) + xctest_list (= 1.2.1) + fastlane-sirp (1.0.0) + sysrandom (~> 1.0) + ffi (1.17.2-arm64-darwin) + fourflusher (2.3.1) + fuzzy_match (2.0.4) + gh_inspector (1.1.3) + google-apis-androidpublisher_v3 (0.54.0) + google-apis-core (>= 0.11.0, < 2.a) + google-apis-core (0.11.3) + addressable (~> 2.5, >= 2.5.1) + googleauth (>= 0.16.2, < 2.a) + httpclient (>= 2.8.1, < 3.a) + mini_mime (~> 1.0) + representable (~> 3.0) + retriable (>= 2.0, < 4.a) + rexml + google-apis-iamcredentials_v1 (0.17.0) + google-apis-core (>= 0.11.0, < 2.a) + google-apis-playcustomapp_v1 (0.13.0) + google-apis-core (>= 0.11.0, < 2.a) + google-apis-storage_v1 (0.31.0) + google-apis-core (>= 0.11.0, < 2.a) + google-cloud-core (1.8.0) + google-cloud-env (>= 1.0, < 3.a) + google-cloud-errors (~> 1.0) + google-cloud-env (1.6.0) + faraday (>= 0.17.3, < 3.0) + google-cloud-errors (1.5.0) + google-cloud-storage (1.47.0) + addressable (~> 2.8) + digest-crc (~> 0.4) + google-apis-iamcredentials_v1 (~> 0.1) + google-apis-storage_v1 (~> 0.31.0) + google-cloud-core (~> 1.6) + googleauth (>= 0.16.2, < 2.a) + mini_mime (~> 1.0) + googleauth (1.8.1) + faraday (>= 0.17.3, < 3.a) + jwt (>= 1.4, < 3.0) + multi_json (~> 1.11) + os (>= 0.9, < 2.0) + signet (>= 0.16, < 2.a) + highline (2.0.3) + http-cookie (1.0.8) + domain_name (~> 0.5) + httpclient (2.9.0) + mutex_m + i18n (1.14.7) + concurrent-ruby (~> 1.0) + jmespath (1.6.2) + json (2.13.0) + jwt (2.10.2) + base64 + logger (1.7.0) + mini_magick (4.13.2) + mini_mime (1.1.5) + minitest (5.25.5) + molinillo (0.8.0) + multi_json (1.17.0) + multipart-post (2.4.1) + mutex_m (0.3.0) + nanaimo (0.4.0) + nap (1.1.0) + naturally (2.3.0) + netrc (0.11.0) + nkf (0.2.0) + optparse (0.6.0) + os (1.1.4) + plist (3.7.2) + public_suffix (4.0.7) + rake (13.3.0) + representable (3.2.0) + declarative (< 0.1.0) + trailblazer-option (>= 0.1.1, < 0.2.0) + uber (< 0.2.0) + retriable (3.1.2) + rexml (3.4.1) + rouge (3.28.0) + ruby-macho (2.5.1) + ruby2_keywords (0.0.5) + rubyzip (2.4.1) + securerandom (0.4.1) + security (0.1.5) + signet (0.20.0) + addressable (~> 2.8) + faraday (>= 0.17.5, < 3.a) + jwt (>= 1.5, < 3.0) + multi_json (~> 1.10) + simctl (1.6.10) + CFPropertyList + naturally + sysrandom (1.0.5) + terminal-notifier (2.0.0) + terminal-table (3.0.2) + unicode-display_width (>= 1.1.1, < 3) + trailblazer-option (0.1.2) + tty-cursor (0.7.1) + tty-screen (0.8.2) + tty-spinner (0.9.3) + tty-cursor (~> 0.7) + typhoeus (1.4.1) + ethon (>= 0.9.0) + tzinfo (2.0.6) + concurrent-ruby (~> 1.0) + uber (0.1.0) + unicode-display_width (2.6.0) + word_wrap (1.0.0) + xcodeproj (1.27.0) + CFPropertyList (>= 2.3.3, < 4.0) + atomos (~> 0.1.3) + claide (>= 1.0.2, < 2.0) + colored2 (~> 3.1) + nanaimo (~> 0.4.0) + rexml (>= 3.3.6, < 4.0) + xcpretty (0.4.1) + rouge (~> 3.28.0) + xcpretty-travis-formatter (1.0.1) + xcpretty (~> 0.2, >= 0.0.7) + xctest_list (1.2.1) + +PLATFORMS + arm64-darwin-24 + +DEPENDENCIES + cocoapods + fastlane + fastlane-plugin-stream_actions (= 0.3.90) + json + plist + +BUNDLED WITH + 2.3.3 diff --git a/fastlane/Matchfile b/fastlane/Matchfile new file mode 100644 index 0000000000..f26ef457be --- /dev/null +++ b/fastlane/Matchfile @@ -0,0 +1,5 @@ +git_url("git@github.com:GetStream/ios-certificates.git") + +storage_mode("git") + +team_id("EHV7XZLAHA") diff --git a/fastlane/Pluginfile b/fastlane/Pluginfile new file mode 100644 index 0000000000..76c87679b1 --- /dev/null +++ b/fastlane/Pluginfile @@ -0,0 +1,5 @@ +# Autogenerated by fastlane +# +# Ensure this file is checked in to source control! + +gem 'fastlane-plugin-stream_actions', '0.3.90' \ No newline at end of file diff --git a/fastlane/build_xcframework.sh b/fastlane/build_xcframework.sh new file mode 100755 index 0000000000..a26887ea72 --- /dev/null +++ b/fastlane/build_xcframework.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +gclient root +gclient config --spec 'solutions = [ +{ + "name": "src", + "url": "git@github.com:GetStream/webrtc.git", + "deps_file": "DEPS", + "managed": False, + "custom_deps": {}, +}, +] +target_os = ["ios", "mac"] +' +gclient sync -j8 -v + +cd src +./tools_webrtc/ios/build_ios_libs.py \ + --deployment-target 13.0 \ + --extra-gn-args \ + is_debug=false \ + use_goma=false \ + use_rtti=false \ + rtc_libvpx_build_vp9=true \ No newline at end of file diff --git a/fastlane/lanes/gclient b/fastlane/lanes/gclient new file mode 100644 index 0000000000..f6d300fc99 --- /dev/null +++ b/fastlane/lanes/gclient @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +require 'fileutils' + +private_lane :configure_gclient do |options| + log_debug( + message: "Configuring gclient...", + verbose: options[:verbose] + ) + + base_root = File.expand_path('../../../', __dir__) + root = if options[:output] + File.expand_path(options[:output], base_root) + else + File.join(base_root, '.output') + end + FileUtils.mkdir_p(root) + + Dir.chdir(root) do + # Set gclient root + execute_command(command: "gclient root", verbose: options[:verbose]) + + # Configure gclient with the spec + gclient_spec = <<~SPEC + solutions = [ + { + "name": "src", + "url": "git@github.com:GetStream/webrtc.git", + "deps_file": "DEPS", + "managed": False, + "custom_deps": {}, + }, + ] + target_os = ["#{options[:target_os]}"] + SPEC + + # Write spec to temporary file and configure + execute_command( + command: "gclient config --spec '#{gclient_spec.gsub("'", "'\"'\"'")}'", + verbose: options[:verbose] + ) + + UI.success("gclient configured successfully") + + sync_dependencies( + number_of_jobs: options[:number_of_jobs], + verbose: options[:verbose] + ) + end +end + +private_lane :sync_dependencies do |options| + jobs = options[:number_of_jobs] || 8 + log_debug( + message: "Syncing dependencies...", + verbose: options[:verbose] + ) + + command = "gclient sync -j#{jobs}" + command += " -v" if options[:verbose] + + execute_command(command: command, verbose: options[:verbose]) + UI.success("Dependencies synced successfully") +end diff --git a/fastlane/lanes/ios b/fastlane/lanes/ios new file mode 100644 index 0000000000..731873774f --- /dev/null +++ b/fastlane/lanes/ios @@ -0,0 +1,305 @@ +# frozen_string_literal: true + +require 'fileutils' +require 'pathname' +fastlane_require "fastlane-plugin-stream_actions" + +platform :ios do + desc "Sync dependencies and build the WebRTC iOS libraries" + + lane :build do |options| + options[:root] = Pathname.new(__dir__).join('..', '..', '..').expand_path + options[:build_root] = File.join(options[:root], ".output") + options[:products_root] = File.join(options[:root], ".products") + options[:sdk_name] = "WebRTC" + options[:product_source] = File.join(options[:root], "src/out_ios_libs/#{options[:sdk_name]}.xcframework") + options[:build_tool] = File.join(options[:root], "src/tools_webrtc/ios/build_ios_libs.py") + options[:rename_to_sdk_name] = "Stream#{options[:sdk_name]}" + options[:product] = File.join(options[:products_root], "#{options[:rename_to_sdk_name]}.xcframework") + options[:match_file] = File.join(options[:root], "src/fastlane/Matchfile") + + log_info(message: "Root: #{options[:root]}") + log_info(message: "Build root: #{options[:build_root]}") + log_info(message: "Products root: #{options[:products_root]}") + log_info(message: "Product source: #{options[:product_source]}") + log_info(message: "Build tool: #{options[:build_tool]}") + log_info(message: "Matchfile: #{options[:match_file]}") + log_info(message: "Product: #{options[:product]}") + + setup_ci if is_ci + clean_up_products(options) + verify_environment(options) + configure_google_client(options) + build_product(options) + move_product(options) + rename_product(options) + prepare_signing(options) + sign_product(options) + zip_product(options) + end + + lane :clean_up_products do |options| + lane_options = extract_prefixed_options(options, "clean_up_products") + next unless lane_options[:skip] != true + log_debug(message: "Cleaning up products", verbose: options[:verbose]) + + products_root = options[:products_root] + assert(message: "Missing required option :products_root") if products_root.to_s.strip.empty? + + unless Dir.exist?(products_root) + return + end + + log_info(message: "Cleaning products directory at #{products_root}") + Dir.children(products_root).each do |entry| + FileUtils.rm_rf(File.join(products_root, entry)) + end + end + + lane :verify_environment do |options| + lane_options = extract_prefixed_options(options, "verify_environment") + next unless lane_options[:skip] != true + + verify_build_environment(verbose: options[:verbose]) + + ensure_required_tool(tool: "xcodebuild", verbose: options[:verbose]) + + # Check if we're on macOS + unless RUBY_PLATFORM.include?('darwin') + log_error(message: "iOS builds require macOS") + end + + # Check if Xcode is installed + unless system("xcode-select -p > /dev/null 2>&1") + log_error(message: "Xcode command line tools not found") + end + end + + lane :configure_google_client do |options| + lane_options = extract_prefixed_options(options, "configure_google_client_") + next if lane_options[:skip] == true + + configure_gclient( + target_os: "ios", + verbose: options[:verbose], + number_of_jobs: options[:number_of_jobs], + output: options[:build_root] + ) + end + + lane :build_product do |options| + lane_options = extract_prefixed_options(options, "build_product") + next if lane_options[:skip] == true + + deployment_target = options[:deployment_target] || "13.0" + + args_list = resolve_build_product_args( + args: extract_prefixed_options(lane_options, "arg"), + verbose: options[:verbose] + ) + + script_path = options[:build_tool] + Dir.chdir(options[:build_root]) do + command_parts = ["\"#{script_path}\""] + command_parts << "--deployment-target #{deployment_target}" + command_parts << "--extra-gn-args" + command_parts.concat(args_list) + + execute_command( + command: command_parts.join(' '), + verbose: options[:verbose] + ) + end + end + + lane :move_product do |options| + lane_options = extract_prefixed_options(options, "move_product_") + next if lane_options[:skip] == true + + product_source = options[:product_source] + assert(message: "Missing required option :product_source") if product_source.to_s.strip.empty? + assert(message: "Product not found at #{product_source}") unless File.exist?(product_source) + + product_destination = options[:products_root] + assert(message: "Missing required option :products_root") if product_destination.to_s.strip.empty? + + FileUtils.mkdir_p(product_destination) + + destination_path = File.join(product_destination, File.basename(product_source)) + log_info(message: "Moving product from #{product_source} to #{destination_path}") + FileUtils.mv(product_source, destination_path) + + File.expand_path(destination_path) + end + + lane :rename_product do |options| + lane_options = extract_prefixed_options(options, "rename_product_") + next if lane_options[:skip] == true + + product_source = options[:product_source] + assert(message: "Missing required option :product_source") if product_source.to_s.strip.empty? + products_root = options[:products_root] + assert(message: "Missing required option :products_root") if products_root.to_s.strip.empty? + product_destination = File.join(products_root, File.basename(product_source)) + assert(message: "Missing required option :product_destination") if product_destination.to_s.strip.empty? + assert(message: "Product not found at #{product_destination}") unless File.exist?(product_destination) + sdk_name = options[:sdk_name] + assert(message: "Missing required option :sdk_name") if sdk_name.to_s.strip.empty? + modified_sdk_name = options[:rename_to_sdk_name] + assert(message: "Missing required option :rename_to_sdk_name") if modified_sdk_name.to_s.strip.empty? + + old_framework_path = product_destination + new_framework_path = File.join(products_root, "#{modified_sdk_name}.xcframework") + + # Rename the framework itself + sh("cp -R #{old_framework_path} #{new_framework_path}") + + # Rename all files with the old framework name with the new one + ["#{sdk_name}.framework", "#{sdk_name}.h", sdk_name].each do |file_name| + Dir.glob("#{new_framework_path}/**/*").each do |old_file_path| + next unless File.basename(old_file_path) == file_name + + new_file_path = old_file_path.reverse.sub(sdk_name.reverse, modified_sdk_name.reverse).reverse + File.rename(old_file_path, new_file_path) + end + end + + # Replace all occurrences of the old framework name with the new one in the plist and modulemap files + Dir.glob(["#{new_framework_path}/**/Info.plist", "#{new_framework_path}/**/module.modulemap"]).each do |file| + sh("plutil -convert xml1 #{file}") if file.include?('Info.plist') + old_text = File.read(file) + new_text = old_text.gsub(/#{sdk_name}/, modified_sdk_name) + File.open(file, 'w') { |f| f.puts(new_text) } if old_text != new_text + end + + # Replace all imports of the old framework with the new one + Dir.glob("#{new_framework_path}/**/*.h").each do |file| + old_text = File.read(file) + new_text = old_text.gsub(/import <#{sdk_name}/, "import <#{modified_sdk_name}") + File.open(file, 'w') { |f| f.puts(new_text) } if old_text != new_text + end + + # Rename the rpath for all the frameworks and update symlinks if required + framework_paths = new_framework_path.include?('.xcframework') ? Dir.glob("#{new_framework_path}/**/*.framework") : [new_framework_path] + framework_paths.each do |path| + Dir.chdir(path) do + if File.symlink?(modified_sdk_name) + old_symlink = File.readlink(modified_sdk_name) + new_symlink = old_symlink.reverse.sub(sdk_name.reverse, modified_sdk_name.reverse).reverse + + File.delete(modified_sdk_name) + File.symlink(new_symlink, modified_sdk_name) + end + + sh("install_name_tool -id @rpath/#{modified_sdk_name}.framework/#{modified_sdk_name} #{modified_sdk_name}") + end + end + new_framework_path + end + + lane :prepare_signing do |options| + lane_options = extract_prefixed_options(options, "prepare_signing") + next unless lane_options[:skip] != true + + custom_match( + api_key: appstore_api_key, + app_identifier: ['io.getstream.iOS.VideoDemoApp'], # dummy app to get the certificates + readonly: true + ) + end + + lane :sign_product do |options| + lane_options = extract_prefixed_options(options, "sign_product_") + next if lane_options[:skip] == true + + matchfile = options[:match_file] + assert(message: "Missing required option :match_file") if matchfile.to_s.strip.empty? + + team_id = File.read(matchfile).match(/team_id\("(.*)"\)/)[1] + execute_command( + command: "codesign --force --timestamp -v --sign 'Apple Distribution: Stream.io Inc (#{team_id})' #{options[:product]}", + verbose: options[:verbose] + ) + end + + lane :zip_product do |options| + lane_options = extract_prefixed_options(options, "zip_product_") + next if lane_options[:skip] == true + + file_path = options[:product] + zip_path = File.join(options[:products_root], "#{options[:rename_to_sdk_name]}.zip") + execute_command( + command: "ditto -c -k --sequesterRsrc --keepParent #{file_path} #{zip_path}", + verbose: options[:verbose] + ) + zip_path + end + + private_lane :resolve_build_product_args do |options| + args = options[:args] || {} + + provided_args = [] + + arg_options = args.each_with_object({}) do |(key, value), memo| + key_str = key.to_s + + next if value.nil? + + memo[key_str] = value + end + + arg_options.each do |key, value| + value_str = value.is_a?(TrueClass) || value.is_a?(FalseClass) ? value.to_s : value.to_s + provided_args << "#{key}=#{value_str}" + end + + default_args = { + "is_debug" => "false", + "use_goma" => "false", + "use_rtti" => "false", + "rtc_libvpx_build_vp9" => "true" + } + + args_map = default_args.dup + additional_args = {} + + provided_args.each do |arg| + next if arg.to_s.strip.empty? + + key, value = arg.split('=', 2) + next if key.nil? || key.strip.empty? + + key = key.strip + value = value.nil? ? '' : value.strip + value = 'true' if value.empty? + + if args_map.key?(key) + args_map[key] = value + else + additional_args[key] = value + end + end + + args_list = default_args.keys.map do |key| + value = args_map[key] + value.nil? || value.empty? ? key : "#{key}=#{value}" + end + + additional_args.each do |key, value| + args_list << (value.nil? || value.empty? ? key : "#{key}=#{value}") + end + + log_info(message: "Resolved GN args: #{args_list.join(', ')}") + + args_list + end + + private_lane :appstore_api_key do + @appstore_api_key ||= app_store_connect_api_key( + key_id: 'MT3PRT8TB7', + issuer_id: '69a6de96-0738-47e3-e053-5b8c7c11a4d1', + key_content: ENV.fetch('APPSTORE_API_KEY', nil), + in_house: false + ) + end +end diff --git a/fastlane/lanes/utilities b/fastlane/lanes/utilities new file mode 100644 index 0000000000..3b6992ea5d --- /dev/null +++ b/fastlane/lanes/utilities @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +private_lane :verify_build_environment do |options| + log_debug( + message: "Verifying build environment...", + verbose: options[:verbose] + ) + + # Check if required tools are available + ensure_required_tool(tool: "gclient", verbose: options[:verbose]) + ensure_required_tool(tool: "python3", verbose: options[:verbose]) + + UI.success("Build environment verified successfully") +end + +private_lane :log_debug do |options| + UI.message(options[:message]) if options[:verbose] +end + +private_lane :log_info do |options| + UI.message(options[:message]) +end + +private_lane :log_success do |options| + UI.success(options[:message]) +end + +private_lane :log_error do |options| + UI.error(options[:message]) +end + +private_lane :assert do |options| + UI.abort_with_message!(options[:message]) +end + +private_lane :ensure_required_tool do |options| + tool = options[:tool] + unless system("which #{tool} > /dev/null 2>&1") + UI.user_error!("Required tool '#{tool}' not found in PATH") + end +end + +private_lane :execute_command do |options| + sh( + options[:command], + print_command: true, + print_command_output: options[:verbose] + ) +end + +def extract_prefixed_options(options, prefix) + return {} if options.nil? + + prefix = prefix.to_s + return {} if prefix.empty? + + prefix = prefix.end_with?('_') ? prefix : "#{prefix}_" + + options.each_with_object({}) do |(key, value), extracted| + next if value.nil? + + key_str = key.to_s + next unless key_str.start_with?(prefix) + + stripped_key = key_str.sub(prefix, '') + extracted[stripped_key.to_sym] = value + end +end + From 0e0e88fdf7a4b77858ce07e51b19e01a313bb7eb Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Wed, 17 Sep 2025 11:37:17 +0300 Subject: [PATCH 02/15] Fix dynamic_cast --- .../peerconnection/RTCAudioDeviceModule.mm | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/sdk/objc/api/peerconnection/RTCAudioDeviceModule.mm b/sdk/objc/api/peerconnection/RTCAudioDeviceModule.mm index aad4b2abed..8d64119630 100644 --- a/sdk/objc/api/peerconnection/RTCAudioDeviceModule.mm +++ b/sdk/objc/api/peerconnection/RTCAudioDeviceModule.mm @@ -299,7 +299,7 @@ - (NSInteger)initRecording { - (NSInteger)initAndStartRecording { return _workerThread->BlockingCall([self] { webrtc::AudioEngineDevice *engine_device = - dynamic_cast(_native.get()); + static_cast(_native.get()); if (engine_device != nullptr) { return engine_device->InitAndStartRecording(); } else { @@ -326,7 +326,7 @@ - (BOOL)isRecording { } - (BOOL)isEngineRunning { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return false; return _workerThread->BlockingCall([module] { return module->IsEngineRunning(); }); @@ -344,7 +344,7 @@ - (NSInteger)setMicrophoneMuted:(BOOL)muted { } - (RTC_OBJC_TYPE(RTCAudioEngineState))engineState { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return RTC_OBJC_TYPE(RTCAudioEngineState)(); return _workerThread->BlockingCall([module] { @@ -363,7 +363,7 @@ - (NSInteger)setMicrophoneMuted:(BOOL)muted { } - (void)setEngineState:(RTC_OBJC_TYPE(RTCAudioEngineState))state { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return; _workerThread->BlockingCall([module, state] { @@ -382,7 +382,7 @@ - (void)setEngineState:(RTC_OBJC_TYPE(RTCAudioEngineState))state { #pragma mark - Unique to AudioEngineDevice - (BOOL)isRecordingAlwaysPreparedMode { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return NO; return _workerThread->BlockingCall([module] { @@ -392,7 +392,7 @@ - (BOOL)isRecordingAlwaysPreparedMode { } - (NSInteger)setRecordingAlwaysPreparedMode:(BOOL)enabled { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return -1; return _workerThread->BlockingCall( @@ -400,7 +400,7 @@ - (NSInteger)setRecordingAlwaysPreparedMode:(BOOL)enabled { } - (BOOL)isManualRenderingMode { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return NO; return _workerThread->BlockingCall([module] { @@ -410,7 +410,7 @@ - (BOOL)isManualRenderingMode { } - (NSInteger)setManualRenderingMode:(BOOL)enabled { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return -1; return _workerThread->BlockingCall( @@ -418,7 +418,7 @@ - (NSInteger)setManualRenderingMode:(BOOL)enabled { } - (BOOL)isAdvancedDuckingEnabled { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return NO; return _workerThread->BlockingCall([module] { @@ -428,7 +428,7 @@ - (BOOL)isAdvancedDuckingEnabled { } - (void)setAdvancedDuckingEnabled:(BOOL)enabled { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return; _workerThread->BlockingCall( @@ -436,7 +436,7 @@ - (void)setAdvancedDuckingEnabled:(BOOL)enabled { } - (NSInteger)duckingLevel { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return 0; return _workerThread->BlockingCall([module] { @@ -446,14 +446,14 @@ - (NSInteger)duckingLevel { } - (void)setDuckingLevel:(NSInteger)value { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return; _workerThread->BlockingCall([module, value] { return module->SetDuckingLevel(value) == 0; }); } - (RTC_OBJC_TYPE(RTCAudioEngineMuteMode))muteMode { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return RTC_OBJC_TYPE(RTCAudioEngineMuteModeUnknown); return _workerThread->BlockingCall([module] { @@ -464,7 +464,7 @@ - (void)setDuckingLevel:(NSInteger)value { } - (NSInteger)setMuteMode:(RTC_OBJC_TYPE(RTCAudioEngineMuteMode))mode { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return -1; return _workerThread->BlockingCall( @@ -472,7 +472,7 @@ - (NSInteger)setMuteMode:(RTC_OBJC_TYPE(RTCAudioEngineMuteMode))mode { } - (BOOL)isVoiceProcessingEnabled { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return NO; return _workerThread->BlockingCall([module] { @@ -482,7 +482,7 @@ - (BOOL)isVoiceProcessingEnabled { } - (NSInteger)setVoiceProcessingEnabled:(BOOL)enabled { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return -1; return _workerThread->BlockingCall( @@ -490,7 +490,7 @@ - (NSInteger)setVoiceProcessingEnabled:(BOOL)enabled { } - (BOOL)isVoiceProcessingBypassed { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return NO; return _workerThread->BlockingCall([module] { @@ -500,7 +500,7 @@ - (BOOL)isVoiceProcessingBypassed { } - (void)setVoiceProcessingBypassed:(BOOL)enabled { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return; _workerThread->BlockingCall( @@ -508,7 +508,7 @@ - (void)setVoiceProcessingBypassed:(BOOL)enabled { } - (BOOL)isVoiceProcessingAGCEnabled { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return NO; return _workerThread->BlockingCall([module] { @@ -518,7 +518,7 @@ - (BOOL)isVoiceProcessingAGCEnabled { } - (void)setVoiceProcessingAGCEnabled:(BOOL)enabled { - webrtc::AudioEngineDevice *module = dynamic_cast(_native.get()); + webrtc::AudioEngineDevice *module = static_cast(_native.get()); if (module == nullptr) return; _workerThread->BlockingCall( From d416b4082652fefeb7a91e92267f37a3a2658e26 Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Wed, 17 Sep 2025 13:37:10 +0300 Subject: [PATCH 03/15] Fix debug build issue --- sdk/objc/native/src/audio/audio_device_ios.mm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/objc/native/src/audio/audio_device_ios.mm b/sdk/objc/native/src/audio/audio_device_ios.mm index 48f75d5c54..26aa09f861 100644 --- a/sdk/objc/native/src/audio/audio_device_ios.mm +++ b/sdk/objc/native/src/audio/audio_device_ios.mm @@ -758,7 +758,7 @@ static void LogDeviceInfo() { } bool AudioDeviceIOS::RestartAudioUnit(bool enable_input) { - RTC_DCHECK_RUN_ON(&io_thread_checker_); + RTC_DCHECK_RUN_ON(thread_); LOGI() << "RestartAudioUnit"; From 13d1d45807b984cf698ed554e0239bf09d5a8060 Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Fri, 19 Sep 2025 16:22:29 +0300 Subject: [PATCH 04/15] Standalone Flag Plumbing - api/peer_connection_interface.h:1564 adds a standalone_audio_source boolean (defaulting to false) to CreateAudioSource, preserving existing call sites but enabling downstream selection of alternate audio pipelines. - pc/peer_connection_factory.h:76 & pc/peer_connection_factory.cc:200 now accept/forward the flag, with an RTC_DCHECK guarding the yet-to-be-implemented standalone path so behavior stays unchanged. - pc/peer_connection_factory_proxy.h:46 and api/test/mock_peer_connection_factory_interface.h:58 propagate the extra argument through the proxy layer and gMock facade to keep tests and thread marshaling in sync. - sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h:90 & .mm:368-385 expose audioSourceWithConstraints:standalone: on the ObjC API, wiring the new boolean straight to the native factory without touching AudioOptions or media constraints. --- api/peer_connection_interface.h | 3 ++- api/test/mock_peer_connection_factory_interface.h | 2 +- pc/peer_connection_factory.cc | 4 +++- pc/peer_connection_factory.h | 3 ++- pc/peer_connection_factory_proxy.h | 5 +++-- sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h | 5 +++++ sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm | 8 +++++++- 7 files changed, 23 insertions(+), 7 deletions(-) diff --git a/api/peer_connection_interface.h b/api/peer_connection_interface.h index e5b3853e11..d98c4300a2 100644 --- a/api/peer_connection_interface.h +++ b/api/peer_connection_interface.h @@ -1562,7 +1562,8 @@ class RTC_EXPORT PeerConnectionFactoryInterface // Creates an AudioSourceInterface. // `options` decides audio processing settings. virtual scoped_refptr CreateAudioSource( - const AudioOptions& options) = 0; + const AudioOptions& options, + bool standalone_audio_source = false) = 0; // Creates a new local VideoTrack. The same `source` can be used in several // tracks. diff --git a/api/test/mock_peer_connection_factory_interface.h b/api/test/mock_peer_connection_factory_interface.h index a297e58435..19f5503683 100644 --- a/api/test/mock_peer_connection_factory_interface.h +++ b/api/test/mock_peer_connection_factory_interface.h @@ -56,7 +56,7 @@ class MockPeerConnectionFactoryInterface (override)); MOCK_METHOD(scoped_refptr, CreateAudioSource, - (const webrtc::AudioOptions&), + (const webrtc::AudioOptions&, bool), (override)); MOCK_METHOD(scoped_refptr, CreateVideoTrack, diff --git a/pc/peer_connection_factory.cc b/pc/peer_connection_factory.cc index e62d25af0b..44088c71b7 100644 --- a/pc/peer_connection_factory.cc +++ b/pc/peer_connection_factory.cc @@ -198,8 +198,10 @@ RtpCapabilities PeerConnectionFactory::GetRtpReceiverCapabilities( } scoped_refptr PeerConnectionFactory::CreateAudioSource( - const AudioOptions& options) { + const AudioOptions& options, bool standalone_audio_source) { RTC_DCHECK(signaling_thread()->IsCurrent()); + RTC_DCHECK(!standalone_audio_source) + << "Standalone audio sources not implemented yet."; scoped_refptr source(LocalAudioSource::Create(&options)); return source; } diff --git a/pc/peer_connection_factory.h b/pc/peer_connection_factory.h index 5b99a69674..9a625f5647 100644 --- a/pc/peer_connection_factory.h +++ b/pc/peer_connection_factory.h @@ -74,7 +74,8 @@ class PeerConnectionFactory : public PeerConnectionFactoryInterface { const std::string& stream_id) override; scoped_refptr CreateAudioSource( - const AudioOptions& options) override; + const AudioOptions& options, + bool standalone_audio_source = false) override; scoped_refptr CreateVideoTrack( scoped_refptr video_source, diff --git a/pc/peer_connection_factory_proxy.h b/pc/peer_connection_factory_proxy.h index e046f66377..da5a337729 100644 --- a/pc/peer_connection_factory_proxy.h +++ b/pc/peer_connection_factory_proxy.h @@ -43,9 +43,10 @@ PROXY_CONSTMETHOD1(RtpCapabilities, PROXY_METHOD1(scoped_refptr, CreateLocalMediaStream, const std::string&) -PROXY_METHOD1(scoped_refptr, +PROXY_METHOD2(scoped_refptr, CreateAudioSource, - const AudioOptions&) + const AudioOptions&, + bool) PROXY_METHOD2(scoped_refptr, CreateVideoTrack, scoped_refptr, diff --git a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h index 62c7554a54..898a43a5c9 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h +++ b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h @@ -87,6 +87,11 @@ RTC_OBJC_EXPORT - (RTC_OBJC_TYPE(RTCAudioSource) *)audioSourceWithConstraints: (nullable RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints; +/** Initialize an RTCAudioSource with constraints and standalone control. */ +- (RTC_OBJC_TYPE(RTCAudioSource) *)audioSourceWithConstraints: + (nullable RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints + standalone:(BOOL)standalone; + /** Initialize an RTCAudioTrack with an id. Convenience ctor to use an audio source * with no constraints. */ diff --git a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm index 791aba1e11..b475a41d7f 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm +++ b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm @@ -367,6 +367,12 @@ - (instancetype)initWithNativeAudioEncoderFactory: - (RTC_OBJC_TYPE(RTCAudioSource) *)audioSourceWithConstraints: (nullable RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints { + return [self audioSourceWithConstraints:constraints standalone:NO]; +} + +- (RTC_OBJC_TYPE(RTCAudioSource) *)audioSourceWithConstraints: + (nullable RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints + standalone:(BOOL)standalone { std::unique_ptr nativeConstraints; if (constraints) { nativeConstraints = constraints.nativeConstraints; @@ -375,7 +381,7 @@ - (instancetype)initWithNativeAudioEncoderFactory: CopyConstraintsIntoAudioOptions(nativeConstraints.get(), &options); webrtc::scoped_refptr source = - _nativeFactory->CreateAudioSource(options); + _nativeFactory->CreateAudioSource(options, standalone); return [[RTC_OBJC_TYPE(RTCAudioSource) alloc] initWithFactory:self nativeAudioSource:source]; } From 7fcf68c2bc5c172cfe3da8938cc06b90a5020b43 Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Fri, 19 Sep 2025 17:19:22 +0300 Subject: [PATCH 05/15] Standalone Source Check-In MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Added the native StandaloneAudioTrackSource implementation (sdk/objc/native/src/standalone_audio_track_source.{h,cc}) plus focused unit coverage (…_unittest.cc). The source keeps start/stop state, fans frames to registered sinks, and exposes a scoped CreateSendStream helper; the test confirms frame delivery, ignores pushes before start, and verifies sink removal. - sdk/BUILD.gn:1908 now only exposes a minimal standalone_audio_track_source library with the direct dependencies it needs and, behind rtc_include_tests, the companion XCTest bundle—no other GN changes remain after your revert. - Formatted the new sources with clang-format and ran the required bundle exec fastlane ios build … flow, which completed successfully --- sdk/BUILD.gn | 34 ++++ .../src/standalone_audio_track_source.cc | 105 ++++++++++++ .../src/standalone_audio_track_source.h | 70 ++++++++ .../standalone_audio_track_source_unittest.cc | 149 ++++++++++++++++++ 4 files changed, 358 insertions(+) create mode 100644 sdk/objc/native/src/standalone_audio_track_source.cc create mode 100644 sdk/objc/native/src/standalone_audio_track_source.h create mode 100644 sdk/objc/native/src/standalone_audio_track_source_unittest.cc diff --git a/sdk/BUILD.gn b/sdk/BUILD.gn index 4e38dbec46..c20b579683 100644 --- a/sdk/BUILD.gn +++ b/sdk/BUILD.gn @@ -1908,6 +1908,40 @@ if (is_ios || is_mac) { ] } + rtc_library("standalone_audio_track_source") { + visibility = [ "*" ] + sources = [ + "objc/native/src/standalone_audio_track_source.cc", + "objc/native/src/standalone_audio_track_source.h", + ] + + configs += [ "..:common_objc" ] + + public_configs = [ ":common_config_objc" ] + + deps = [ + "../api:media_stream_interface", + "../api/audio:audio_frame_api", + "../call:call_interfaces", + "../rtc_base:checks", + "../rtc_base/synchronization:mutex", + ] + } + + if (rtc_include_tests) { + rtc_test("standalone_audio_track_source_unittest") { + testonly = true + sources = + [ "objc/native/src/standalone_audio_track_source_unittest.cc" ] + deps = [ + ":standalone_audio_track_source", + "../api:media_stream_interface", + "../api/audio:audio_frame_api", + "../test:test_support", + ] + } + } + rtc_library("video_toolbox_cc") { visibility = [ ":sdk_unittests_sources", diff --git a/sdk/objc/native/src/standalone_audio_track_source.cc b/sdk/objc/native/src/standalone_audio_track_source.cc new file mode 100644 index 0000000000..0d00307a51 --- /dev/null +++ b/sdk/objc/native/src/standalone_audio_track_source.cc @@ -0,0 +1,105 @@ +/* + * Copyright 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "sdk/objc/native/src/standalone_audio_track_source.h" + +#include + +namespace webrtc { + +namespace { + +constexpr int kBitsPerSample = 16; + +} // namespace + +StandaloneAudioTrackSource::StandaloneAudioTrackSource() + : state_(MediaSourceInterface::kInitializing), started_(false) {} + +StandaloneAudioTrackSource::~StandaloneAudioTrackSource() = default; + +void StandaloneAudioTrackSource::Start() { + bool expected = false; + if (started_.compare_exchange_strong(expected, true)) { + SetState(MediaSourceInterface::kLive); + } +} + +void StandaloneAudioTrackSource::Stop() { + bool expected = true; + if (started_.compare_exchange_strong(expected, false)) { + SetState(MediaSourceInterface::kEnded); + } +} + +MediaSourceInterface::SourceState StandaloneAudioTrackSource::state() const { + return state_.load(); +} + +void StandaloneAudioTrackSource::AddSink(AudioTrackSinkInterface* sink) { + RTC_DCHECK(sink); + MutexLock lock(&sink_lock_); + RTC_DCHECK(std::find(sinks_.begin(), sinks_.end(), sink) == sinks_.end()); + sinks_.push_back(sink); +} + +void StandaloneAudioTrackSource::RemoveSink(AudioTrackSinkInterface* sink) { + RTC_DCHECK(sink); + MutexLock lock(&sink_lock_); + auto it = std::find(sinks_.begin(), sinks_.end(), sink); + if (it != sinks_.end()) { + sinks_.erase(it); + } +} + +void StandaloneAudioTrackSource::PushAudioFrame(const AudioFrame& frame) { + if (!started_.load()) { + return; + } + + MutexLock lock(&sink_lock_); + if (sinks_.empty()) { + return; + } + + const int16_t* audio_data = frame.data(); + const size_t samples_per_channel = frame.samples_per_channel(); + const size_t num_channels = frame.num_channels(); + const int sample_rate = frame.sample_rate_hz(); + RTC_DCHECK(audio_data); + + for (auto* sink : sinks_) { + sink->OnData(audio_data, kBitsPerSample, sample_rate, num_channels, + samples_per_channel, frame.absolute_capture_timestamp_ms()); + } +} + +StandaloneAudioTrackSource::AudioSendStreamPtr +StandaloneAudioTrackSource::CreateSendStream( + Call* call, + const AudioSendStream::Config& config) { + RTC_DCHECK(call); + AudioSendStream* stream = call->CreateAudioSendStream(config); + RTC_CHECK(stream); + return AudioSendStreamPtr(stream, [call](AudioSendStream* to_destroy) { + if (to_destroy) { + call->DestroyAudioSendStream(to_destroy); + } + }); +} + +void StandaloneAudioTrackSource::SetState(SourceState new_state) { + SourceState old_state = state_.exchange(new_state); + if (old_state != new_state) { + FireOnChanged(); + } +} + +} // namespace webrtc diff --git a/sdk/objc/native/src/standalone_audio_track_source.h b/sdk/objc/native/src/standalone_audio_track_source.h new file mode 100644 index 0000000000..fa2368832b --- /dev/null +++ b/sdk/objc/native/src/standalone_audio_track_source.h @@ -0,0 +1,70 @@ +/* + * Copyright 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef SDK_OBJC_NATIVE_SRC_STANDALONE_AUDIO_TRACK_SOURCE_H_ +#define SDK_OBJC_NATIVE_SRC_STANDALONE_AUDIO_TRACK_SOURCE_H_ + +#include +#include +#include +#include + +#include "api/audio/audio_frame.h" +#include "api/media_stream_interface.h" +#include "api/notifier.h" +#include "call/audio_send_stream.h" +#include "call/call.h" +#include "rtc_base/checks.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +// StandaloneAudioTrackSource provides a manual audio pipeline surface that can +// be fed with 10 ms PCM frames without relying on AudioTransportImpl. +class StandaloneAudioTrackSource : public Notifier { + public: + StandaloneAudioTrackSource(); + ~StandaloneAudioTrackSource() override; + + void Start(); + void Stop(); + + // MediaSourceInterface implementation. + SourceState state() const override; + bool remote() const override { return false; } + + // AudioSourceInterface implementation. + void AddSink(AudioTrackSinkInterface* sink) override; + void RemoveSink(AudioTrackSinkInterface* sink) override; + + // Allows callers to push 10 ms 16-bit PCM frames directly into the source. + void PushAudioFrame(const AudioFrame& frame); + + using AudioSendStreamPtr = + std::unique_ptr>; + + // Creates a dedicated AudioSendStream associated with the supplied Call. + AudioSendStreamPtr CreateSendStream(Call* call, + const AudioSendStream::Config& config); + + private: + void SetState(SourceState new_state); + + std::atomic state_; + std::atomic started_; + + mutable Mutex sink_lock_; + std::vector sinks_ RTC_GUARDED_BY(sink_lock_); +}; + +} // namespace webrtc + +#endif // SDK_OBJC_NATIVE_SRC_STANDALONE_AUDIO_TRACK_SOURCE_H_ diff --git a/sdk/objc/native/src/standalone_audio_track_source_unittest.cc b/sdk/objc/native/src/standalone_audio_track_source_unittest.cc new file mode 100644 index 0000000000..129be28050 --- /dev/null +++ b/sdk/objc/native/src/standalone_audio_track_source_unittest.cc @@ -0,0 +1,149 @@ +/* + * Copyright 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "sdk/objc/native/src/standalone_audio_track_source.h" + +#include +#include +#include + +#include "api/audio/audio_frame.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +constexpr int kSampleRateHz = 48000; +constexpr size_t kChannels = 1; +constexpr int64_t kCaptureTimestampMs = 1234; + +class RecordingSink : public AudioTrackSinkInterface { + public: + void OnData(const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + std::optional absolute_capture_timestamp_ms) override { + ++callback_count_; + bits_per_sample_ = bits_per_sample; + sample_rate_ = sample_rate; + channels_ = number_of_channels; + frames_ = number_of_frames; + capture_timestamp_ = absolute_capture_timestamp_ms; + + const int16_t* typed_data = static_cast(audio_data); + size_t sample_count = number_of_channels * number_of_frames; + last_data_.assign(typed_data, typed_data + sample_count); + } + + void Reset() { + callback_count_ = 0; + bits_per_sample_ = 0; + sample_rate_ = 0; + channels_ = 0; + frames_ = 0; + capture_timestamp_.reset(); + last_data_.clear(); + } + + int callback_count() const { return callback_count_; } + int bits_per_sample() const { return bits_per_sample_; } + int sample_rate() const { return sample_rate_; } + size_t channels() const { return channels_; } + size_t frames() const { return frames_; } + const std::vector& last_data() const { return last_data_; } + const std::optional& capture_timestamp() const { + return capture_timestamp_; + } + + private: + int callback_count_ = 0; + int bits_per_sample_ = 0; + int sample_rate_ = 0; + size_t channels_ = 0; + size_t frames_ = 0; + std::optional capture_timestamp_; + std::vector last_data_; +}; + +AudioFrame CreateTestFrame() { + AudioFrame frame; + frame.SetSampleRateAndChannelSize(kSampleRateHz); + const size_t samples_per_channel = frame.samples_per_channel(); + + std::vector payload(samples_per_channel * kChannels); + for (size_t i = 0; i < payload.size(); ++i) { + payload[i] = static_cast(i & 0xFF); + } + frame.UpdateFrame(/*timestamp=*/0, payload.data(), samples_per_channel, + kSampleRateHz, AudioFrame::kNormalSpeech, + AudioFrame::kVadActive, kChannels); + frame.set_absolute_capture_timestamp_ms(kCaptureTimestampMs); + return frame; +} + +TEST(StandaloneAudioTrackSourceTest, DeliversFramesToRegisteredSink) { + StandaloneAudioTrackSource source; + RecordingSink sink; + + source.AddSink(&sink); + source.Start(); + + AudioFrame frame = CreateTestFrame(); + source.PushAudioFrame(frame); + + EXPECT_EQ(MediaSourceInterface::kLive, source.state()); + ASSERT_EQ(1, sink.callback_count()); + EXPECT_EQ(16, sink.bits_per_sample()); + EXPECT_EQ(kSampleRateHz, sink.sample_rate()); + EXPECT_EQ(kChannels, sink.channels()); + EXPECT_EQ(frame.samples_per_channel(), sink.frames()); + + std::vector expected(frame.samples_per_channel() * kChannels); + for (size_t i = 0; i < expected.size(); ++i) { + expected[i] = static_cast(i & 0xFF); + } + EXPECT_EQ(expected, sink.last_data()); + ASSERT_TRUE(sink.capture_timestamp().has_value()); + EXPECT_EQ(kCaptureTimestampMs, sink.capture_timestamp().value()); + + source.Stop(); + EXPECT_EQ(MediaSourceInterface::kEnded, source.state()); +} + +TEST(StandaloneAudioTrackSourceTest, IgnoresPushWhenNotStarted) { + StandaloneAudioTrackSource source; + RecordingSink sink; + source.AddSink(&sink); + + AudioFrame frame = CreateTestFrame(); + source.PushAudioFrame(frame); + EXPECT_EQ(0, sink.callback_count()); + EXPECT_EQ(MediaSourceInterface::kInitializing, source.state()); +} + +TEST(StandaloneAudioTrackSourceTest, RemoveSinkStopsDelivery) { + StandaloneAudioTrackSource source; + RecordingSink sink; + source.AddSink(&sink); + source.Start(); + + AudioFrame frame = CreateTestFrame(); + source.PushAudioFrame(frame); + ASSERT_EQ(1, sink.callback_count()); + + source.RemoveSink(&sink); + source.PushAudioFrame(frame); + EXPECT_EQ(1, sink.callback_count()); +} + +} // namespace +} // namespace webrtc From 96c2ad3cbff844f15bc26f03f831d71bee93608d Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Fri, 19 Sep 2025 17:55:05 +0300 Subject: [PATCH 06/15] Bridge Added - Introduced RTCAudioFrame (sdk/objc/api/peerconnection/RTCAudioFrame.h, .mm) to wrap raw 16-bit PCM payloads along with metadata (sample rate, channels, frame count, timestamps) for Objective-C callers. - Added RTCStandaloneAudioSource (sdk/objc/api/peerconnection/RTCStandaloneAudioSource.h, .mm, plus RTCStandaloneAudioSource+Private.h) to build on RTCAudioSource, own the native StandaloneAudioTrackSource, expose start/stop, and forward RTCAudioFrame data through PushAudioFrame. - Updated sdk/BUILD.gn:1042,1208,1473 so the new bridge files are compiled, the umbrella exports the headers, and the ObjC peerconnection library links our existing native standalone_audio_track_source target. --- fastlane/Fastfile | 2 + sdk/BUILD.gn | 8 ++ sdk/objc/api/peerconnection/RTCAudioFrame.h | 40 ++++++++++ sdk/objc/api/peerconnection/RTCAudioFrame.mm | 74 ++++++++++++++++++ .../RTCStandaloneAudioSource+Private.h | 29 +++++++ .../peerconnection/RTCStandaloneAudioSource.h | 33 ++++++++ .../RTCStandaloneAudioSource.mm | 76 +++++++++++++++++++ 7 files changed, 262 insertions(+) create mode 100644 sdk/objc/api/peerconnection/RTCAudioFrame.h create mode 100644 sdk/objc/api/peerconnection/RTCAudioFrame.mm create mode 100644 sdk/objc/api/peerconnection/RTCStandaloneAudioSource+Private.h create mode 100644 sdk/objc/api/peerconnection/RTCStandaloneAudioSource.h create mode 100644 sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm diff --git a/fastlane/Fastfile b/fastlane/Fastfile index 52b91dd388..b8ef179df1 100644 --- a/fastlane/Fastfile +++ b/fastlane/Fastfile @@ -1,3 +1,5 @@ +skip_docs + require 'json' require 'net/http' require 'fileutils' diff --git a/sdk/BUILD.gn b/sdk/BUILD.gn index c20b579683..41a0454293 100644 --- a/sdk/BUILD.gn +++ b/sdk/BUILD.gn @@ -1042,9 +1042,14 @@ if (is_ios || is_mac) { "objc/api/peerconnection/RTCAudioDeviceModule.mm", "objc/api/peerconnection/RTCIODevice.h", "objc/api/peerconnection/RTCIODevice.mm", + "objc/api/peerconnection/RTCAudioFrame.h", + "objc/api/peerconnection/RTCAudioFrame.mm", "objc/api/peerconnection/RTCAudioSource+Private.h", "objc/api/peerconnection/RTCAudioSource.h", "objc/api/peerconnection/RTCAudioSource.mm", + "objc/api/peerconnection/RTCStandaloneAudioSource+Private.h", + "objc/api/peerconnection/RTCStandaloneAudioSource.h", + "objc/api/peerconnection/RTCStandaloneAudioSource.mm", "objc/api/peerconnection/RTCAudioTrack+Private.h", "objc/api/peerconnection/RTCAudioTrack.h", "objc/api/peerconnection/RTCAudioTrack.mm", @@ -1203,6 +1208,7 @@ if (is_ios || is_mac) { ":audiorendereradapter_objc", ":videosource_objc", ":videotoolbox_objc", + ":standalone_audio_track_source", "../api/crypto:frame_crypto_transformer", "../api:dtmf_sender_interface", "../api:enable_media", @@ -1467,7 +1473,9 @@ if (is_ios || is_mac) { "objc/helpers/UIDevice+RTCDevice.h", "objc/api/peerconnection/RTCAudioDeviceModule.h", "objc/api/peerconnection/RTCIODevice.h", + "objc/api/peerconnection/RTCAudioFrame.h", "objc/api/peerconnection/RTCAudioSource.h", + "objc/api/peerconnection/RTCStandaloneAudioSource.h", "objc/api/peerconnection/RTCAudioTrack.h", "objc/api/peerconnection/RTCConfiguration.h", "objc/api/peerconnection/RTCDataChannel.h", diff --git a/sdk/objc/api/peerconnection/RTCAudioFrame.h b/sdk/objc/api/peerconnection/RTCAudioFrame.h new file mode 100644 index 0000000000..4d15dce2fd --- /dev/null +++ b/sdk/objc/api/peerconnection/RTCAudioFrame.h @@ -0,0 +1,40 @@ +/* + * Copyright 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import + +#import "sdk/objc/base/RTCMacros.h" + +NS_ASSUME_NONNULL_BEGIN + +RTC_OBJC_EXPORT +@interface RTC_OBJC_TYPE(RTCAudioFrame) : NSObject + +- (instancetype)init NS_UNAVAILABLE; + +- (instancetype)initWithData:(NSData *)data + sampleRateHz:(int)sampleRateHz + channels:(NSUInteger)channels + framesPerChannel:(NSUInteger)framesPerChannel + timestamp:(uint32_t)timestamp + absoluteCaptureTimestampMs:(nullable NSNumber *)absoluteCaptureTimestampMs + NS_DESIGNATED_INITIALIZER; + +@property(nonatomic, readonly) NSData *data; +@property(nonatomic, readonly) int sampleRateHz; +@property(nonatomic, readonly) NSUInteger channels; +@property(nonatomic, readonly) NSUInteger framesPerChannel; +@property(nonatomic, readonly) uint32_t timestamp; +@property(nonatomic, readonly, nullable) + NSNumber *absoluteCaptureTimestampMs; + +@end + +NS_ASSUME_NONNULL_END diff --git a/sdk/objc/api/peerconnection/RTCAudioFrame.mm b/sdk/objc/api/peerconnection/RTCAudioFrame.mm new file mode 100644 index 0000000000..eb4226c432 --- /dev/null +++ b/sdk/objc/api/peerconnection/RTCAudioFrame.mm @@ -0,0 +1,74 @@ +/* + * Copyright 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import "RTCAudioFrame.h" + +static inline size_t BytesForFrame(NSUInteger channels, + NSUInteger framesPerChannel) { + return channels * framesPerChannel * sizeof(int16_t); +} + +@implementation RTC_OBJC_TYPE(RTCAudioFrame) { + NSData *_data; + int _sampleRateHz; + NSUInteger _channels; + NSUInteger _framesPerChannel; + uint32_t _timestamp; + NSNumber *_absoluteCaptureTimestampMs; +} + +- (instancetype)initWithData:(NSData *)data + sampleRateHz:(int)sampleRateHz + channels:(NSUInteger)channels + framesPerChannel:(NSUInteger)framesPerChannel + timestamp:(uint32_t)timestamp + absoluteCaptureTimestampMs:(NSNumber *)absoluteCaptureTimestampMs { + NSParameterAssert(sampleRateHz > 0); + NSParameterAssert(channels > 0); + NSParameterAssert(framesPerChannel > 0); + NSParameterAssert(data.length == BytesForFrame(channels, framesPerChannel)); + + self = [super init]; + if (self) { + _data = [data copy]; + _sampleRateHz = sampleRateHz; + _channels = channels; + _framesPerChannel = framesPerChannel; + _timestamp = timestamp; + _absoluteCaptureTimestampMs = absoluteCaptureTimestampMs; + } + return self; +} + +- (NSData *)data { + return _data; +} + +- (int)sampleRateHz { + return _sampleRateHz; +} + +- (NSUInteger)channels { + return _channels; +} + +- (NSUInteger)framesPerChannel { + return _framesPerChannel; +} + +- (uint32_t)timestamp { + return _timestamp; +} + +- (NSNumber *)absoluteCaptureTimestampMs { + return _absoluteCaptureTimestampMs; +} + +@end diff --git a/sdk/objc/api/peerconnection/RTCStandaloneAudioSource+Private.h b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource+Private.h new file mode 100644 index 0000000000..d4bc9f0565 --- /dev/null +++ b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource+Private.h @@ -0,0 +1,29 @@ +/* + * Copyright 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import "RTCStandaloneAudioSource.h" + +#include "api/scoped_refptr.h" + +namespace webrtc { +class StandaloneAudioTrackSource; +} // namespace webrtc + +NS_ASSUME_NONNULL_BEGIN + +@interface RTC_OBJC_TYPE(RTCStandaloneAudioSource) () + +@property(nonatomic, readonly) + rtc::scoped_refptr + nativeStandaloneSource; + +@end + +NS_ASSUME_NONNULL_END diff --git a/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.h b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.h new file mode 100644 index 0000000000..41b97e7c4e --- /dev/null +++ b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.h @@ -0,0 +1,33 @@ +/* + * Copyright 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import "RTCAudioSource.h" + +NS_ASSUME_NONNULL_BEGIN + +@class RTC_OBJC_TYPE(RTCAudioFrame); +@class RTC_OBJC_TYPE(RTCPeerConnectionFactory); + +RTC_OBJC_EXPORT +@interface RTC_OBJC_TYPE(RTCStandaloneAudioSource) : RTC_OBJC_TYPE(RTCAudioSource) + +- (instancetype)init NS_UNAVAILABLE; + +- (instancetype)initWithFactory: + (RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory; + +- (void)start; +- (void)stop; + +- (void)pushAudioFrame:(RTC_OBJC_TYPE(RTCAudioFrame) *)frame; + +@end + +NS_ASSUME_NONNULL_END diff --git a/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm new file mode 100644 index 0000000000..d50958f135 --- /dev/null +++ b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm @@ -0,0 +1,76 @@ +/* + * Copyright 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import "RTCStandaloneAudioSource+Private.h" + +#import "RTCAudioFrame.h" +#import "RTCAudioSource+Private.h" +#import "RTCPeerConnectionFactory+Private.h" + +#include + +#include "api/audio/audio_frame.h" +#include "api/make_ref_counted.h" +#include "rtc_base/checks.h" +#include "sdk/objc/native/src/standalone_audio_track_source.h" + +namespace { + +using NativeStandaloneSource = webrtc::StandaloneAudioTrackSource; + +} // namespace + +@implementation RTC_OBJC_TYPE(RTCStandaloneAudioSource) { + rtc::scoped_refptr _nativeStandaloneSource; +} + +@synthesize nativeStandaloneSource = _nativeStandaloneSource; + +- (instancetype)initWithFactory:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory { + RTC_DCHECK(factory); + auto native_source = rtc::make_ref_counted(); + self = [super initWithFactory:factory nativeAudioSource:native_source]; + if (self) { + _nativeStandaloneSource = std::move(native_source); + } + return self; +} + +- (void)start { + _nativeStandaloneSource->Start(); +} + +- (void)stop { + _nativeStandaloneSource->Stop(); +} + +- (void)pushAudioFrame:(RTC_OBJC_TYPE(RTCAudioFrame) *)frame { + if (!frame) { + return; + } + + webrtc::AudioFrame native_frame; + native_frame.SetSampleRateAndChannelSize(frame.sampleRateHz); + + native_frame.UpdateFrame(frame.timestamp, + static_cast(frame.data.bytes), + frame.framesPerChannel, frame.sampleRateHz, + webrtc::AudioFrame::kNormalSpeech, + webrtc::AudioFrame::kVadUnknown, frame.channels); + + if (frame.absoluteCaptureTimestampMs != nil) { + native_frame.set_absolute_capture_timestamp_ms( + frame.absoluteCaptureTimestampMs.longLongValue); + } + + _nativeStandaloneSource->PushAudioFrame(native_frame); +} + +@end From fcea5cfd9cda57fa0d7e91997f33a7fa16b627ee Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Fri, 19 Sep 2025 18:26:51 +0300 Subject: [PATCH 07/15] Standalone Helper - Added StandaloneAudioSendHelper (sdk/objc/native/src/standalone_audio_send_helper.h, .cc) to own the per-source AudioSendStream, expose voe::ChannelSendInterface/RtpRtcpInterface, and tear it down safely with RAII. - Hooked StandaloneAudioTrackSource::CreateSendStream to return the new helper (sdk/objc/native/src/standalone_audio_track_source.h, .cc), keeping existing frame push logic intact. - Expanded the unit test suite with StandaloneAudioSendHelperTest, building a minimal Call/AudioState scaffold and verifying the helper returns valid stream, channel, and RTP handles (sdk/objc/native/src/standalone_audio_track_source_unittest.cc). - Updated GN wiring so both the helper and test compile and link with the needed call/audio-mixer/device components (sdk/BUILD.gn). --- sdk/BUILD.gn | 7 +++ .../src/standalone_audio_send_helper.cc | 42 +++++++++++++++ .../native/src/standalone_audio_send_helper.h | 54 +++++++++++++++++++ .../src/standalone_audio_track_source.cc | 12 ++--- .../src/standalone_audio_track_source.h | 6 +-- .../standalone_audio_track_source_unittest.cc | 52 ++++++++++++++++++ 6 files changed, 163 insertions(+), 10 deletions(-) create mode 100644 sdk/objc/native/src/standalone_audio_send_helper.cc create mode 100644 sdk/objc/native/src/standalone_audio_send_helper.h diff --git a/sdk/BUILD.gn b/sdk/BUILD.gn index 41a0454293..c131fb26e7 100644 --- a/sdk/BUILD.gn +++ b/sdk/BUILD.gn @@ -1919,6 +1919,8 @@ if (is_ios || is_mac) { rtc_library("standalone_audio_track_source") { visibility = [ "*" ] sources = [ + "objc/native/src/standalone_audio_send_helper.cc", + "objc/native/src/standalone_audio_send_helper.h", "objc/native/src/standalone_audio_track_source.cc", "objc/native/src/standalone_audio_track_source.h", ] @@ -1945,6 +1947,11 @@ if (is_ios || is_mac) { ":standalone_audio_track_source", "../api:media_stream_interface", "../api/audio:audio_frame_api", + "../api/audio:builtin_audio_processing_builder", + "../api/environment:environment_factory", + "../call:call", + "../modules/audio_device:audio_device", + "../modules/audio_mixer:audio_mixer_impl", "../test:test_support", ] } diff --git a/sdk/objc/native/src/standalone_audio_send_helper.cc b/sdk/objc/native/src/standalone_audio_send_helper.cc new file mode 100644 index 0000000000..88246885af --- /dev/null +++ b/sdk/objc/native/src/standalone_audio_send_helper.cc @@ -0,0 +1,42 @@ +/* + * Copyright 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "sdk/objc/native/src/standalone_audio_send_helper.h" + +#include + +#include "audio/audio_send_stream.h" +#include "audio/channel_send.h" +#include "call/call.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +StandaloneAudioSendHelper::StandaloneAudioSendHelper( + Call* call, + const AudioSendStream::Config& config) + : call_(call) { + RTC_DCHECK(call_); + audio_send_stream_ = call_->CreateAudioSendStream(config); + RTC_CHECK(audio_send_stream_); + + auto* internal_stream = + static_cast(audio_send_stream_); + channel_send_ = internal_stream->GetChannel(); + rtp_rtcp_ = channel_send_ ? channel_send_->GetRtpRtcp() : nullptr; +} + +StandaloneAudioSendHelper::~StandaloneAudioSendHelper() { + if (call_ && audio_send_stream_) { + call_->DestroyAudioSendStream(audio_send_stream_); + } +} + +} // namespace webrtc diff --git a/sdk/objc/native/src/standalone_audio_send_helper.h b/sdk/objc/native/src/standalone_audio_send_helper.h new file mode 100644 index 0000000000..4ffa40d5b6 --- /dev/null +++ b/sdk/objc/native/src/standalone_audio_send_helper.h @@ -0,0 +1,54 @@ +/* + * Copyright 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef SDK_OBJC_NATIVE_SRC_STANDALONE_AUDIO_SEND_HELPER_H_ +#define SDK_OBJC_NATIVE_SRC_STANDALONE_AUDIO_SEND_HELPER_H_ + +#include + +#include "call/audio_send_stream.h" + +namespace webrtc { + +class Call; +class RtpRtcpInterface; + +namespace voe { +class ChannelSendInterface; +} // namespace voe + +// RAII wrapper around a dedicated AudioSendStream created for standalone audio +// sources. Instances manage stream lifetime and provide access to the +// underlying ChannelSend and RtpRtcp interfaces for integration with higher +// layers. +class StandaloneAudioSendHelper { + public: + StandaloneAudioSendHelper(Call* call, + const AudioSendStream::Config& config); + ~StandaloneAudioSendHelper(); + + StandaloneAudioSendHelper(const StandaloneAudioSendHelper&) = delete; + StandaloneAudioSendHelper& operator=(const StandaloneAudioSendHelper&) = + delete; + + AudioSendStream* audio_send_stream() const { return audio_send_stream_; } + const voe::ChannelSendInterface* channel_send() const { return channel_send_; } + RtpRtcpInterface* rtp_rtcp() const { return rtp_rtcp_; } + + private: + Call* call_ = nullptr; + AudioSendStream* audio_send_stream_ = nullptr; + const voe::ChannelSendInterface* channel_send_ = nullptr; + RtpRtcpInterface* rtp_rtcp_ = nullptr; +}; + +} // namespace webrtc + +#endif // SDK_OBJC_NATIVE_SRC_STANDALONE_AUDIO_SEND_HELPER_H_ diff --git a/sdk/objc/native/src/standalone_audio_track_source.cc b/sdk/objc/native/src/standalone_audio_track_source.cc index 0d00307a51..60ddf1a76f 100644 --- a/sdk/objc/native/src/standalone_audio_track_source.cc +++ b/sdk/objc/native/src/standalone_audio_track_source.cc @@ -12,6 +12,8 @@ #include +#include "sdk/objc/native/src/standalone_audio_send_helper.h" + namespace webrtc { namespace { @@ -86,13 +88,9 @@ StandaloneAudioTrackSource::CreateSendStream( Call* call, const AudioSendStream::Config& config) { RTC_DCHECK(call); - AudioSendStream* stream = call->CreateAudioSendStream(config); - RTC_CHECK(stream); - return AudioSendStreamPtr(stream, [call](AudioSendStream* to_destroy) { - if (to_destroy) { - call->DestroyAudioSendStream(to_destroy); - } - }); + auto helper = std::make_unique(call, config); + RTC_CHECK(helper->audio_send_stream()); + return helper; } void StandaloneAudioTrackSource::SetState(SourceState new_state) { diff --git a/sdk/objc/native/src/standalone_audio_track_source.h b/sdk/objc/native/src/standalone_audio_track_source.h index fa2368832b..d5277c8ade 100644 --- a/sdk/objc/native/src/standalone_audio_track_source.h +++ b/sdk/objc/native/src/standalone_audio_track_source.h @@ -27,6 +27,8 @@ namespace webrtc { +class StandaloneAudioSendHelper; + // StandaloneAudioTrackSource provides a manual audio pipeline surface that can // be fed with 10 ms PCM frames without relying on AudioTransportImpl. class StandaloneAudioTrackSource : public Notifier { @@ -48,10 +50,8 @@ class StandaloneAudioTrackSource : public Notifier { // Allows callers to push 10 ms 16-bit PCM frames directly into the source. void PushAudioFrame(const AudioFrame& frame); - using AudioSendStreamPtr = - std::unique_ptr>; - // Creates a dedicated AudioSendStream associated with the supplied Call. + using AudioSendStreamPtr = std::unique_ptr; AudioSendStreamPtr CreateSendStream(Call* call, const AudioSendStream::Config& config); diff --git a/sdk/objc/native/src/standalone_audio_track_source_unittest.cc b/sdk/objc/native/src/standalone_audio_track_source_unittest.cc index 129be28050..dd084f1a09 100644 --- a/sdk/objc/native/src/standalone_audio_track_source_unittest.cc +++ b/sdk/objc/native/src/standalone_audio_track_source_unittest.cc @@ -12,9 +12,20 @@ #include #include +#include #include #include "api/audio/audio_frame.h" +#include "api/audio/builtin_audio_processing_builder.h" +#include "api/audio_codecs/audio_format.h" +#include "api/make_ref_counted.h" +#include "api/environment/environment_factory.h" +#include "call/audio_state.h" +#include "call/call.h" +#include "call/call_config.h" +#include "modules/audio_device/include/fake_audio_device.h" +#include "modules/audio_mixer/audio_mixer_impl.h" +#include "sdk/objc/native/src/standalone_audio_send_helper.h" #include "test/gtest.h" namespace webrtc { @@ -145,5 +156,46 @@ TEST(StandaloneAudioTrackSourceTest, RemoveSinkStopsDelivery) { EXPECT_EQ(1, sink.callback_count()); } +class NullTransport : public Transport { + public: + bool SendRtp(const uint8_t* /*packet*/, size_t /*length*/, + const PacketOptions& /*options*/) override { + return true; + } + + bool SendRtcp(const uint8_t* /*packet*/, size_t /*length*/) override { + return true; + } +}; + +TEST(StandaloneAudioSendHelperTest, CreatesStreamAndExposesInterfaces) { + const Environment env = CreateEnvironment(); + + AudioState::Config audio_state_config; + audio_state_config.audio_mixer = AudioMixerImpl::Create(); + audio_state_config.audio_processing = + BuiltinAudioProcessingBuilder().Create(); + audio_state_config.audio_device_module = + make_ref_counted(); + + auto audio_state = AudioState::Create(audio_state_config); + + CallConfig call_config(env); + call_config.audio_state = audio_state; + std::unique_ptr call = Call::Create(std::move(call_config)); + + NullTransport transport; + AudioSendStream::Config config(&transport); + config.rtp.ssrc = 1234; + + StandaloneAudioTrackSource source; + auto helper = source.CreateSendStream(call.get(), config); + + ASSERT_NE(helper, nullptr); + EXPECT_NE(helper->audio_send_stream(), nullptr); + EXPECT_NE(helper->channel_send(), nullptr); + EXPECT_NE(helper->rtp_rtcp(), nullptr); +} + } // namespace } // namespace webrtc From 950656926b1d83ede86c9cf90b9d5ed499fd67d4 Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Fri, 19 Sep 2025 19:29:35 +0300 Subject: [PATCH 08/15] Changes - Updated the factory branching so PeerConnectionFactory::CreateAudioSource now returns StandaloneAudioTrackSource when the standalone flag is set on iOS, while leaving other platforms on the legacy path (pc/peer_connection_factory.cc:204). - Wired the ObjC factory to import the standalone bridge, detect standalone requests, and wrap the returned native source in RTCStandaloneAudioSource, including a safe cast to the native implementation (sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm:11, sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm:386). - Added a private initializer so RTCStandaloneAudioSource can take an existing native standalone source, and refactored the designated initializer to share that path (sdk/objc/api/peerconnection/RTCStandaloneAudioSource+Private.h:27, sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm:36). - Taught the GN target to pull in the standalone audio source library when building for iOS, satisfying the new dependency (pc/BUILD.gn:1577). --- pc/BUILD.gn | 3 +++ pc/peer_connection_factory.cc | 11 +++++++++++ .../peerconnection/RTCPeerConnectionFactory.mm | 14 ++++++++++++++ .../RTCStandaloneAudioSource+Private.h | 5 +++++ .../peerconnection/RTCStandaloneAudioSource.mm | 16 ++++++++++++---- 5 files changed, 45 insertions(+), 4 deletions(-) diff --git a/pc/BUILD.gn b/pc/BUILD.gn index d6b0acbe56..8956a09cd0 100644 --- a/pc/BUILD.gn +++ b/pc/BUILD.gn @@ -1574,6 +1574,9 @@ rtc_source_set("peer_connection_factory") { "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/strings:string_view", ] + if (is_ios) { + deps += [ "../sdk:standalone_audio_track_source" ] + } } rtc_library("peer_connection_message_handler") { diff --git a/pc/peer_connection_factory.cc b/pc/peer_connection_factory.cc index 44088c71b7..b5a0da1bf5 100644 --- a/pc/peer_connection_factory.cc +++ b/pc/peer_connection_factory.cc @@ -66,6 +66,10 @@ #include "rtc_base/rtc_certificate_generator.h" #include "rtc_base/system/file_wrapper.h" +#if defined(WEBRTC_IOS) +#include "sdk/objc/native/src/standalone_audio_track_source.h" +#endif // defined(WEBRTC_IOS) + namespace webrtc { scoped_refptr @@ -200,8 +204,15 @@ RtpCapabilities PeerConnectionFactory::GetRtpReceiverCapabilities( scoped_refptr PeerConnectionFactory::CreateAudioSource( const AudioOptions& options, bool standalone_audio_source) { RTC_DCHECK(signaling_thread()->IsCurrent()); +#if defined(WEBRTC_IOS) + if (standalone_audio_source) { + return make_ref_counted(); + } +#else RTC_DCHECK(!standalone_audio_source) << "Standalone audio sources not implemented yet."; +#endif + scoped_refptr source(LocalAudioSource::Create(&options)); return source; } diff --git a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm index b475a41d7f..9ba7e68a0e 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm +++ b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm @@ -9,6 +9,7 @@ */ #include +#include #import "RTCPeerConnectionFactory+Native.h" #import "RTCPeerConnectionFactory+Private.h" @@ -22,6 +23,7 @@ #import "RTCAudioTrack+Private.h" #import "RTCMediaConstraints+Private.h" #import "RTCMediaStream+Private.h" +#import "RTCStandaloneAudioSource+Private.h" #import "RTCPeerConnection+Private.h" #import "RTCVideoSource+Private.h" #import "RTCVideoTrack+Private.h" @@ -57,6 +59,7 @@ #include "sdk/objc/native/api/video_encoder_factory.h" #include "sdk/objc/native/src/objc_video_decoder_factory.h" #include "sdk/objc/native/src/objc_video_encoder_factory.h" +#include "sdk/objc/native/src/standalone_audio_track_source.h" #import "components/audio/RTCAudioProcessingModule.h" #import "components/audio/RTCDefaultAudioProcessingModule+Private.h" @@ -382,6 +385,17 @@ - (instancetype)initWithNativeAudioEncoderFactory: webrtc::scoped_refptr source = _nativeFactory->CreateAudioSource(options, standalone); + if (standalone) { + auto* standalone_ptr = + static_cast(source.get()); + RTC_CHECK(standalone_ptr); + rtc::scoped_refptr standalone_source( + standalone_ptr); + return [[RTC_OBJC_TYPE(RTCStandaloneAudioSource) alloc] + initWithFactory:self + nativeStandaloneSource:std::move(standalone_source)]; + } + return [[RTC_OBJC_TYPE(RTCAudioSource) alloc] initWithFactory:self nativeAudioSource:source]; } diff --git a/sdk/objc/api/peerconnection/RTCStandaloneAudioSource+Private.h b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource+Private.h index d4bc9f0565..e2ede641de 100644 --- a/sdk/objc/api/peerconnection/RTCStandaloneAudioSource+Private.h +++ b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource+Private.h @@ -24,6 +24,11 @@ NS_ASSUME_NONNULL_BEGIN rtc::scoped_refptr nativeStandaloneSource; +- (instancetype)initWithFactory: + (RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory + nativeStandaloneSource:(rtc::scoped_refptr) + nativeStandaloneSource; + @end NS_ASSUME_NONNULL_END diff --git a/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm index d50958f135..b0790ea979 100644 --- a/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm +++ b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm @@ -33,16 +33,24 @@ @implementation RTC_OBJC_TYPE(RTCStandaloneAudioSource) { @synthesize nativeStandaloneSource = _nativeStandaloneSource; -- (instancetype)initWithFactory:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory { +- (instancetype)initWithFactory:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory + nativeStandaloneSource: + (rtc::scoped_refptr)nativeStandaloneSource { RTC_DCHECK(factory); - auto native_source = rtc::make_ref_counted(); - self = [super initWithFactory:factory nativeAudioSource:native_source]; + RTC_DCHECK(nativeStandaloneSource); + self = [super initWithFactory:factory nativeAudioSource:nativeStandaloneSource]; if (self) { - _nativeStandaloneSource = std::move(native_source); + _nativeStandaloneSource = std::move(nativeStandaloneSource); } return self; } +- (instancetype)initWithFactory:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory { + RTC_DCHECK(factory); + auto native_source = rtc::make_ref_counted(); + return [self initWithFactory:factory nativeStandaloneSource:native_source]; +} + - (void)start { _nativeStandaloneSource->Start(); } From 7c179259b798d4625d640bb9c4f478e8aa21c72a Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Sat, 20 Sep 2025 11:06:03 +0300 Subject: [PATCH 09/15] pdate AudioRtpSender so it binds to standalone sources directly while preserving the legacy AudioTransportImpl path; add tests covering both source types. Run the Fastlane command afterward. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Added AudioSourceInterface::is_standalone() and implemented it in the iOS standalone source so we can detect standalone producers without RTTI (api/media_stream_interface.h:266-273, sdk/objc/native/src/standalone_audio_track_source.h:42-56). - On iOS the audio sender now swaps to a StandaloneAudioFrameForwarder that pushes 10 ms PCM straight into the per-SSRC AudioSendStream, while legacy sources continue through the existing sink adapter (pc/rtp_sender.h:343-441, pc/rtp_sender.cc:680-744, pc/rtp_sender.cc:819-965). - Voice-channel plumbing exposes GetAudioSendStream and the fake call gains a send-frame counter, letting the standalone path drive real streams in both production and tests (media/base/media_channel.h:889-903, media/base/fake_media_engine.cc:277-349, media/engine/webrtc_voice_engine.cc:867-1874, media/engine/fake_webrtc_call.h:74-117). - RtpSenderReceiver tests now accept arbitrary audio sources and include an iOS-only regression that pushes PCM through StandaloneAudioTrackSource, asserting the fake send stream records delivery (pc/rtp_sender_receiver_unittest.cc:195-210, pc/rtp_sender_receiver_unittest.cc:345-357, pc/rtp_sender_receiver_unittest.cc:602-626). --- api/media_stream_interface.h | 4 + media/base/fake_media_engine.cc | 31 ++++ media/base/fake_media_engine.h | 13 ++ media/base/media_channel.h | 8 + media/engine/fake_webrtc_call.h | 6 +- media/engine/webrtc_voice_engine.cc | 15 ++ media/engine/webrtc_voice_engine.h | 13 +- pc/rtp_sender.cc | 140 ++++++++++++++++++ pc/rtp_sender.h | 35 +++++ pc/rtp_sender_receiver_unittest.cc | 45 +++++- .../src/standalone_audio_track_source.h | 1 + 11 files changed, 304 insertions(+), 7 deletions(-) diff --git a/api/media_stream_interface.h b/api/media_stream_interface.h index adc0b6825c..11c4d55db7 100644 --- a/api/media_stream_interface.h +++ b/api/media_stream_interface.h @@ -263,6 +263,10 @@ class RTC_EXPORT AudioSourceInterface : public MediaSourceInterface { virtual void AddSink(AudioTrackSinkInterface* /* sink */) {} virtual void RemoveSink(AudioTrackSinkInterface* /* sink */) {} + // Indicates whether the source produces standalone PCM frames that bypass + // the legacy audio device module pipeline. + virtual bool is_standalone() const { return false; } + // Returns options for the AudioSource. // (for some of the settings this approach is broken, e.g. setting // audio network adaptation on the source is the wrong layer of abstraction). diff --git a/media/base/fake_media_engine.cc b/media/base/fake_media_engine.cc index 5485c02441..ff2e35c41f 100644 --- a/media/base/fake_media_engine.cc +++ b/media/base/fake_media_engine.cc @@ -285,6 +285,23 @@ bool FakeVoiceMediaSendChannel::SetAudioSend(uint32_t ssrc, ssrc, !enable)) { return false; } +#if defined(WEBRTC_IOS) + if (!source) { + if (enable) { + auto& stream = standalone_send_streams_[ssrc]; + if (!stream) { + webrtc::AudioSendStream::Config config(/*send_transport=*/nullptr); + config.rtp.ssrc = ssrc; + stream = + std::make_unique(ssrc, config); + } + } else { + standalone_send_streams_.erase(ssrc); + } + } else { + standalone_send_streams_.erase(ssrc); + } +#endif if (enable && options) { return SetOptions(*options); } @@ -318,6 +335,20 @@ bool FakeVoiceMediaSendChannel::GetOutputVolume(uint32_t ssrc, double* volume) { bool FakeVoiceMediaSendChannel::GetStats(VoiceMediaSendInfo* /* info */) { return false; } + +#if defined(WEBRTC_IOS) +webrtc::AudioSendStream* FakeVoiceMediaSendChannel::GetAudioSendStream( + uint32_t ssrc) { + auto it = standalone_send_streams_.find(ssrc); + return it != standalone_send_streams_.end() ? it->second.get() : nullptr; +} + +webrtc::FakeAudioSendStream* +FakeVoiceMediaSendChannel::GetFakeAudioSendStreamForTesting(uint32_t ssrc) { + auto it = standalone_send_streams_.find(ssrc); + return it != standalone_send_streams_.end() ? it->second.get() : nullptr; +} +#endif bool FakeVoiceMediaSendChannel::SetSendCodecs( const std::vector& codecs) { if (fail_set_send_codecs()) { diff --git a/media/base/fake_media_engine.h b/media/base/fake_media_engine.h index 132205817c..242423146c 100644 --- a/media/base/fake_media_engine.h +++ b/media/base/fake_media_engine.h @@ -72,6 +72,10 @@ #include "rtc_base/system/file_wrapper.h" #include "test/explicit_key_value_config.h" +#if defined(WEBRTC_IOS) +#include "media/engine/fake_webrtc_call.h" +#endif + namespace webrtc { class FakeMediaEngine; @@ -614,6 +618,11 @@ class FakeVoiceMediaSendChannel std::optional GetSendCodec() const override; bool GetStats(VoiceMediaSendInfo* stats) override; +#if defined(WEBRTC_IOS) + webrtc::AudioSendStream* GetAudioSendStream(uint32_t ssrc) override; + webrtc::FakeAudioSendStream* GetFakeAudioSendStreamForTesting( + uint32_t ssrc); +#endif private: class VoiceChannelAudioSink : public AudioSource::Sink { @@ -646,6 +655,10 @@ class FakeVoiceMediaSendChannel AudioOptions options_; std::map> local_sinks_; int max_bps_; +#if defined(WEBRTC_IOS) + std::map> + standalone_send_streams_; +#endif }; // A helper function to compare the FakeVoiceMediaChannel::DtmfInfo. diff --git a/media/base/media_channel.h b/media/base/media_channel.h index 6b7164fdf9..6fcfcf7b9b 100644 --- a/media/base/media_channel.h +++ b/media/base/media_channel.h @@ -63,6 +63,10 @@ #include "rtc_base/string_encode.h" #include "rtc_base/strings/string_builder.h" +#if defined(WEBRTC_IOS) +#include "call/audio_send_stream.h" +#endif + namespace webrtc { class VideoFrame; struct VideoFormat; @@ -892,6 +896,10 @@ class VoiceMediaSendChannelInterface : public MediaSendChannelInterface { bool enable, const webrtc::AudioOptions* options, webrtc::AudioSource* source) = 0; +#if defined(WEBRTC_IOS) + // Returns the underlying AudioSendStream associated with `ssrc`, if any. + virtual webrtc::AudioSendStream* GetAudioSendStream(uint32_t ssrc) = 0; +#endif // Returns if the telephone-event has been negotiated. virtual bool CanInsertDtmf() = 0; // Send a DTMF `event`. The DTMF out-of-band signal will be used. diff --git a/media/engine/fake_webrtc_call.h b/media/engine/fake_webrtc_call.h index aec1fc1626..4890598b7e 100644 --- a/media/engine/fake_webrtc_call.h +++ b/media/engine/fake_webrtc_call.h @@ -88,6 +88,7 @@ class FakeAudioSendStream final : public AudioSendStream { TelephoneEvent GetLatestTelephoneEvent() const; bool IsSending() const { return sending_; } bool muted() const { return muted_; } + int sent_audio_frames() const { return sent_audio_frames_; } private: // webrtc::AudioSendStream implementation. @@ -95,7 +96,9 @@ class FakeAudioSendStream final : public AudioSendStream { SetParametersCallback callback) override; void Start() override { sending_ = true; } void Stop() override { sending_ = false; } - void SendAudioData(std::unique_ptr /* audio_frame */) override {} + void SendAudioData(std::unique_ptr /* audio_frame */) override { + ++sent_audio_frames_; + } bool SendTelephoneEvent(int payload_type, int payload_frequency, int event, @@ -110,6 +113,7 @@ class FakeAudioSendStream final : public AudioSendStream { AudioSendStream::Stats stats_; bool sending_ = false; bool muted_ = false; + int sent_audio_frames_ = 0; }; class FakeAudioReceiveStream final : public AudioReceiveStreamInterface { diff --git a/media/engine/webrtc_voice_engine.cc b/media/engine/webrtc_voice_engine.cc index d1a22b5b03..21fb6fa3dc 100644 --- a/media/engine/webrtc_voice_engine.cc +++ b/media/engine/webrtc_voice_engine.cc @@ -872,6 +872,10 @@ class WebRtcVoiceSendChannel::WebRtcAudioSendStream : public AudioSource::Sink { call_->DestroyAudioSendStream(stream_); } +#if defined(WEBRTC_IOS) + webrtc::AudioSendStream* audio_send_stream() const { return stream_; } +#endif + void SetSendCodecSpec( const AudioSendStream::Config::SendCodecSpec& send_codec_spec) { UpdateSendCodecSpec(send_codec_spec); @@ -1859,6 +1863,17 @@ void WebRtcVoiceSendChannel::SetEncoderToPacketizerFrameTransformer( std::move(frame_transformer)); } +#if defined(WEBRTC_IOS) +webrtc::AudioSendStream* WebRtcVoiceSendChannel::GetAudioSendStream( + uint32_t ssrc) { + auto matching_stream = send_streams_.find(ssrc); + if (matching_stream == send_streams_.end()) { + return nullptr; + } + return matching_stream->second->audio_send_stream(); +} +#endif + RtpParameters WebRtcVoiceSendChannel::GetRtpSendParameters( uint32_t ssrc) const { RTC_DCHECK_RUN_ON(worker_thread_); diff --git a/media/engine/webrtc_voice_engine.h b/media/engine/webrtc_voice_engine.h index 422b003615..19ea2235de 100644 --- a/media/engine/webrtc_voice_engine.h +++ b/media/engine/webrtc_voice_engine.h @@ -251,9 +251,12 @@ // Sets a frame transformer between encoder and packetizer, to transform // encoded frames before sending them out the network. - void SetEncoderToPacketizerFrameTransformer( - uint32_t ssrc, - scoped_refptr frame_transformer) override; + void SetEncoderToPacketizerFrameTransformer( + uint32_t ssrc, + scoped_refptr frame_transformer) override; +#if defined(WEBRTC_IOS) + webrtc::AudioSendStream* GetAudioSendStream(uint32_t ssrc) override; +#endif bool SenderNackEnabled() const override { if (!send_codec_spec_) { @@ -302,7 +305,7 @@ const MediaConfig::Audio audio_config_; - class WebRtcAudioSendStream; + class WebRtcAudioSendStream; std::map send_streams_; std::vector send_rtp_extensions_; @@ -509,4 +512,4 @@ } // namespace cricket #endif // WEBRTC_ALLOW_DEPRECATED_NAMESPACES - #endif // MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_ \ No newline at end of file + #endif // MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_ diff --git a/pc/rtp_sender.cc b/pc/rtp_sender.cc index 31bbba0493..996847e523 100644 --- a/pc/rtp_sender.cc +++ b/pc/rtp_sender.cc @@ -22,6 +22,7 @@ #include #include "absl/algorithm/container.h" +#include "api/audio/audio_frame.h" #include "api/audio_options.h" #include "api/crypto/frame_encryptor_interface.h" #include "api/dtmf_sender_interface.h" @@ -50,6 +51,10 @@ #include "rtc_base/thread.h" #include "rtc_base/trace_event.h" +#if defined(WEBRTC_IOS) +#include "call/audio_send_stream.h" +#endif + namespace webrtc { namespace { @@ -671,6 +676,75 @@ void LocalAudioSinkAdapter::SetSink(AudioSource::Sink* sink) { sink_ = sink; } +#if defined(WEBRTC_IOS) +StandaloneAudioFrameForwarder::StandaloneAudioFrameForwarder() = default; + +void StandaloneAudioFrameForwarder::SetSendStream( + AudioSendStream* send_stream) { + MutexLock lock(&lock_); + send_stream_ = send_stream; + if (!send_stream_) { + timestamp_ = 0; + } +} + +void StandaloneAudioFrameForwarder::SetEnabled(bool enabled) { + MutexLock lock(&lock_); + enabled_ = enabled; +} + +void StandaloneAudioFrameForwarder::Reset() { + MutexLock lock(&lock_); + send_stream_ = nullptr; + enabled_ = false; + timestamp_ = 0; +} + +void StandaloneAudioFrameForwarder::OnData( + const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames) { + OnData(audio_data, bits_per_sample, sample_rate, number_of_channels, + number_of_frames, std::nullopt); +} + +void StandaloneAudioFrameForwarder::OnData( + const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + std::optional absolute_capture_timestamp_ms) { + RTC_DCHECK_EQ(bits_per_sample, 16); + AudioSendStream* send_stream = nullptr; + uint32_t timestamp = 0; + { + MutexLock lock(&lock_); + if (!enabled_ || !send_stream_) { + return; + } + send_stream = send_stream_; + timestamp = timestamp_; + timestamp_ += static_cast(number_of_frames); + } + + RTC_DCHECK(audio_data); + + auto frame = std::make_unique(); + frame->UpdateFrame(timestamp, static_cast(audio_data), + number_of_frames, sample_rate, + AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, + number_of_channels); + if (absolute_capture_timestamp_ms) { + frame->set_absolute_capture_timestamp_ms( + *absolute_capture_timestamp_ms); + } + send_stream->SendAudioData(std::move(frame)); +} +#endif + scoped_refptr AudioRtpSender::Create( const webrtc::Environment& env, Thread* worker_thread, @@ -744,12 +818,38 @@ void AudioRtpSender::OnChanged() { void AudioRtpSender::DetachTrack() { RTC_DCHECK(track_); + RTC_DCHECK_RUN_ON(signaling_thread_); +#if defined(WEBRTC_IOS) + if (using_standalone_source_) { + if (standalone_frame_forwarder_) { + audio_track()->RemoveSink(standalone_frame_forwarder_.get()); + standalone_frame_forwarder_->Reset(); + } + using_standalone_source_ = false; + return; + } +#endif audio_track()->RemoveSink(sink_adapter_.get()); } void AudioRtpSender::AttachTrack() { RTC_DCHECK(track_); + RTC_DCHECK_RUN_ON(signaling_thread_); cached_track_enabled_ = track_->enabled(); +#if defined(WEBRTC_IOS) + using_standalone_source_ = + audio_track()->GetSource() && audio_track()->GetSource()->is_standalone(); + if (using_standalone_source_) { + if (!standalone_frame_forwarder_) { + standalone_frame_forwarder_ = + std::make_unique(); + } + standalone_frame_forwarder_->Reset(); + standalone_frame_forwarder_->SetEnabled(cached_track_enabled_); + audio_track()->AddSink(standalone_frame_forwarder_.get()); + return; + } +#endif audio_track()->AddSink(sink_adapter_.get()); } @@ -800,6 +900,30 @@ void AudioRtpSender::SetSend() { // `track_->enabled()` hops to the signaling thread, so call it before we hop // to the worker thread or else it will deadlock. bool track_enabled = track_->enabled(); +#if defined(WEBRTC_IOS) + if (using_standalone_source_) { + bool success = worker_thread_->BlockingCall([&] { + return voice_media_channel()->SetAudioSend(ssrc_, track_enabled, &options, + nullptr); + }); + if (!success) { + RTC_LOG(LS_ERROR) << "SetAudioSend: ssrc is incorrect: " << ssrc_; + } + AudioSendStream* send_stream = worker_thread_->BlockingCall([&] { + return voice_media_channel()->GetAudioSendStream(ssrc_); + }); + if (!send_stream) { + RTC_LOG(LS_WARNING) + << "Standalone audio source missing AudioSendStream for ssrc " + << ssrc_; + } + if (standalone_frame_forwarder_) { + standalone_frame_forwarder_->SetSendStream(send_stream); + standalone_frame_forwarder_->SetEnabled(track_enabled); + } + return; + } +#endif bool success = worker_thread_->BlockingCall([&] { return voice_media_channel()->SetAudioSend(ssrc_, track_enabled, &options, sink_adapter_.get()); @@ -818,6 +942,22 @@ void AudioRtpSender::ClearSend() { return; } AudioOptions options; +#if defined(WEBRTC_IOS) + if (using_standalone_source_) { + if (standalone_frame_forwarder_) { + standalone_frame_forwarder_->SetEnabled(false); + standalone_frame_forwarder_->SetSendStream(nullptr); + } + bool success = worker_thread_->BlockingCall([&] { + return voice_media_channel()->SetAudioSend(ssrc_, false, &options, + nullptr); + }); + if (!success) { + RTC_LOG(LS_WARNING) << "ClearAudioSend: ssrc is incorrect: " << ssrc_; + } + return; + } +#endif bool success = worker_thread_->BlockingCall([&] { return voice_media_channel()->SetAudioSend(ssrc_, false, &options, nullptr); }); diff --git a/pc/rtp_sender.h b/pc/rtp_sender.h index ef7310ff5e..42968eac0f 100644 --- a/pc/rtp_sender.h +++ b/pc/rtp_sender.h @@ -47,6 +47,8 @@ namespace webrtc { +class AudioSendStream; + bool UnimplementedRtpParameterHasValue(const RtpParameters& parameters); // Internal interface used by PeerConnection. @@ -338,6 +340,35 @@ class LocalAudioSinkAdapter : public AudioTrackSinkInterface, int num_preferred_channels_ = -1; }; +#if defined(WEBRTC_IOS) +class StandaloneAudioFrameForwarder : public AudioTrackSinkInterface { + public: + StandaloneAudioFrameForwarder(); + void SetSendStream(AudioSendStream* send_stream); + void SetEnabled(bool enabled); + void Reset(); + + private: + void OnData(const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + std::optional absolute_capture_timestamp_ms) override; + void OnData(const void* audio_data, + int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames) override; + int NumPreferredChannels() const override { return -1; } + + Mutex lock_; + AudioSendStream* send_stream_ RTC_GUARDED_BY(lock_) = nullptr; + bool enabled_ RTC_GUARDED_BY(lock_) = false; + uint32_t timestamp_ RTC_GUARDED_BY(lock_) = 0; +}; +#endif + class AudioRtpSender : public DtmfProviderInterface, public RtpSenderBase { public: // Construct an RtpSender for audio with the given sender ID. @@ -405,6 +436,10 @@ class AudioRtpSender : public DtmfProviderInterface, public RtpSenderBase { // Used to pass the data callback from the `track_` to the other end of // webrtc::AudioSource. std::unique_ptr sink_adapter_; +#if defined(WEBRTC_IOS) + bool using_standalone_source_ RTC_GUARDED_BY(signaling_thread_) = false; + std::unique_ptr standalone_frame_forwarder_; +#endif }; class VideoRtpSender : public RtpSenderBase { diff --git a/pc/rtp_sender_receiver_unittest.cc b/pc/rtp_sender_receiver_unittest.cc index 4c5722bd64..157cd20108 100644 --- a/pc/rtp_sender_receiver_unittest.cc +++ b/pc/rtp_sender_receiver_unittest.cc @@ -10,6 +10,7 @@ #include +#include #include #include #include @@ -21,6 +22,7 @@ #include "absl/algorithm/container.h" #include "api/audio_codecs/audio_codec_pair_id.h" #include "api/audio_options.h" +#include "api/audio/audio_frame.h" #include "api/crypto/crypto_options.h" #include "api/crypto/frame_decryptor_interface.h" #include "api/crypto/frame_encryptor_interface.h" @@ -69,6 +71,10 @@ #include "test/run_loop.h" #include "test/wait_until.h" +#if defined(WEBRTC_IOS) +#include "sdk/objc/native/src/standalone_audio_track_source.h" +#endif + namespace { static const char kStreamId1[] = "local_stream_1"; @@ -186,7 +192,8 @@ class RtpSenderReceiverTest void CreateAudioRtpSender() { CreateAudioRtpSender(nullptr); } - void CreateAudioRtpSender(const scoped_refptr& source) { + void CreateAudioRtpSender( + const scoped_refptr& source) { audio_track_ = AudioTrack::Create(kAudioTrackId, source); EXPECT_TRUE(local_stream_->AddTrack(audio_track_)); std::unique_ptr set_streams_observer = @@ -336,6 +343,14 @@ class RtpSenderReceiverTest void VerifyVoiceChannelInput() { VerifyVoiceChannelInput(kAudioSsrc); } void VerifyVoiceChannelInput(uint32_t ssrc) { +#if defined(WEBRTC_IOS) + if (!voice_media_send_channel()->HasSource(ssrc)) { + EXPECT_NE(nullptr, + voice_media_send_channel()->GetAudioSendStream(ssrc)); + EXPECT_FALSE(voice_media_send_channel()->IsStreamMuted(ssrc)); + return; + } +#endif // Verify that the media channel has an audio source, and the stream isn't // muted. EXPECT_TRUE(voice_media_send_channel()->HasSource(ssrc)); @@ -584,6 +599,34 @@ TEST_F(RtpSenderReceiverTest, LocalAudioSourceOptionsApplied) { DestroyAudioRtpSender(); } +#if defined(WEBRTC_IOS) +TEST_F(RtpSenderReceiverTest, StandaloneAudioSourceForwardsFrames) { + auto standalone_source = + rtc::make_ref_counted(); + CreateAudioRtpSender(standalone_source); + + auto* send_stream = + voice_media_send_channel()->GetFakeAudioSendStreamForTesting(kAudioSsrc); + ASSERT_NE(nullptr, send_stream); + EXPECT_EQ(0, send_stream->sent_audio_frames()); + + standalone_source->Start(); + constexpr int kSampleRateHz = 48000; + constexpr size_t kNumChannels = 1; + constexpr size_t kFramesPerBuffer = kSampleRateHz / 100; + std::array samples{}; + AudioFrame frame; + frame.UpdateFrame(0, samples.data(), kFramesPerBuffer, kSampleRateHz, + AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, + kNumChannels); + standalone_source->PushAudioFrame(frame); + + EXPECT_EQ(1, send_stream->sent_audio_frames()); + + DestroyAudioRtpSender(); +} +#endif + // Test that the stream is muted when the track is disabled, and unmuted when // the track is enabled. TEST_F(RtpSenderReceiverTest, LocalAudioTrackDisable) { diff --git a/sdk/objc/native/src/standalone_audio_track_source.h b/sdk/objc/native/src/standalone_audio_track_source.h index d5277c8ade..af4b89b9a5 100644 --- a/sdk/objc/native/src/standalone_audio_track_source.h +++ b/sdk/objc/native/src/standalone_audio_track_source.h @@ -42,6 +42,7 @@ class StandaloneAudioTrackSource : public Notifier { // MediaSourceInterface implementation. SourceState state() const override; bool remote() const override { return false; } + bool is_standalone() const override { return true; } // AudioSourceInterface implementation. void AddSink(AudioTrackSinkInterface* sink) override; From 691c63000c36996f7643cf3e807d339585a5b70a Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Sat, 20 Sep 2025 15:47:41 +0300 Subject: [PATCH 10/15] Expose ObjC API --- sdk/BUILD.gn | 3 + sdk/objc/api/peerconnection/RTCAudioSource.h | 11 +- sdk/objc/api/peerconnection/RTCAudioSource.mm | 7 + sdk/objc/api/peerconnection/RTCAudioTrack.mm | 25 ++- .../api/peerconnection/RTCPCMAudioCapturer.h | 39 ++++ .../api/peerconnection/RTCPCMAudioCapturer.mm | 201 ++++++++++++++++++ .../peerconnection/RTCPeerConnectionFactory.h | 5 + .../RTCPeerConnectionFactory.mm | 7 + 8 files changed, 296 insertions(+), 2 deletions(-) create mode 100644 sdk/objc/api/peerconnection/RTCPCMAudioCapturer.h create mode 100644 sdk/objc/api/peerconnection/RTCPCMAudioCapturer.mm diff --git a/sdk/BUILD.gn b/sdk/BUILD.gn index c131fb26e7..1cafdc145e 100644 --- a/sdk/BUILD.gn +++ b/sdk/BUILD.gn @@ -1050,6 +1050,8 @@ if (is_ios || is_mac) { "objc/api/peerconnection/RTCStandaloneAudioSource+Private.h", "objc/api/peerconnection/RTCStandaloneAudioSource.h", "objc/api/peerconnection/RTCStandaloneAudioSource.mm", + "objc/api/peerconnection/RTCPCMAudioCapturer.h", + "objc/api/peerconnection/RTCPCMAudioCapturer.mm", "objc/api/peerconnection/RTCAudioTrack+Private.h", "objc/api/peerconnection/RTCAudioTrack.h", "objc/api/peerconnection/RTCAudioTrack.mm", @@ -1476,6 +1478,7 @@ if (is_ios || is_mac) { "objc/api/peerconnection/RTCAudioFrame.h", "objc/api/peerconnection/RTCAudioSource.h", "objc/api/peerconnection/RTCStandaloneAudioSource.h", + "objc/api/peerconnection/RTCPCMAudioCapturer.h", "objc/api/peerconnection/RTCAudioTrack.h", "objc/api/peerconnection/RTCConfiguration.h", "objc/api/peerconnection/RTCDataChannel.h", diff --git a/sdk/objc/api/peerconnection/RTCAudioSource.h b/sdk/objc/api/peerconnection/RTCAudioSource.h index b62e362fe5..0f3681e934 100644 --- a/sdk/objc/api/peerconnection/RTCAudioSource.h +++ b/sdk/objc/api/peerconnection/RTCAudioSource.h @@ -15,8 +15,17 @@ NS_ASSUME_NONNULL_BEGIN +@class RTC_OBJC_TYPE(RTCAudioFrame); + +RTC_OBJC_EXPORT +@protocol RTC_OBJC_TYPE(RTCAudioCapturerDelegate) + +- (void)pushAudioFrame:(RTC_OBJC_TYPE(RTCAudioFrame) *)frame; + +@end + RTC_OBJC_EXPORT -@interface RTC_OBJC_TYPE (RTCAudioSource) : RTC_OBJC_TYPE(RTCMediaSource) +@interface RTC_OBJC_TYPE (RTCAudioSource) : RTC_OBJC_TYPE(RTCMediaSource) - (instancetype)init NS_UNAVAILABLE; diff --git a/sdk/objc/api/peerconnection/RTCAudioSource.mm b/sdk/objc/api/peerconnection/RTCAudioSource.mm index 458d24435e..4a0d4f0980 100644 --- a/sdk/objc/api/peerconnection/RTCAudioSource.mm +++ b/sdk/objc/api/peerconnection/RTCAudioSource.mm @@ -10,6 +10,9 @@ #import "RTCAudioSource+Private.h" +#import "RTCAudioFrame.h" +#import "RTCLogging.h" + #include "rtc_base/checks.h" @implementation RTC_OBJC_TYPE (RTCAudioSource) { @@ -55,4 +58,8 @@ - (void)setVolume:(double)volume { _nativeAudioSource->SetVolume(volume); } +- (void)pushAudioFrame:(RTC_OBJC_TYPE(RTCAudioFrame) *)frame { + RTCLogWarning(@"pushAudioFrame is only supported by RTCStandaloneAudioSource."); +} + @end diff --git a/sdk/objc/api/peerconnection/RTCAudioTrack.mm b/sdk/objc/api/peerconnection/RTCAudioTrack.mm index 30e4596478..704e1e71bf 100644 --- a/sdk/objc/api/peerconnection/RTCAudioTrack.mm +++ b/sdk/objc/api/peerconnection/RTCAudioTrack.mm @@ -15,6 +15,7 @@ #import "RTCAudioRenderer.h" #import "RTCAudioSource+Private.h" +#import "RTCStandaloneAudioSource.h" #import "RTCMediaStreamTrack+Private.h" #import "RTCPeerConnectionFactory+Private.h" #import "api/RTCAudioRendererAdapter+Private.h" @@ -65,6 +66,28 @@ - (instancetype)initWithFactory: return self; } +- (void)setIsEnabled:(BOOL)isEnabled { + BOOL wasEnabled = self.isEnabled; + [super setIsEnabled:isEnabled]; + if (wasEnabled == isEnabled) { + return; + } + + RTC_OBJC_TYPE(RTCAudioSource) *source = _source; + if (!source) { + source = [self source]; + } + if ([source isKindOfClass:[RTC_OBJC_TYPE(RTCStandaloneAudioSource) class]]) { + RTC_OBJC_TYPE(RTCStandaloneAudioSource) *standaloneSource = + (RTC_OBJC_TYPE(RTCStandaloneAudioSource) *)source; + if (isEnabled) { + [standaloneSource start]; + } else { + [standaloneSource stop]; + } + } +} + - (void)dealloc { [self removeAllRenderers]; } @@ -147,4 +170,4 @@ - (void)removeAllRenderers { static_cast(self.nativeTrack.get())); } -@end \ No newline at end of file +@end diff --git a/sdk/objc/api/peerconnection/RTCPCMAudioCapturer.h b/sdk/objc/api/peerconnection/RTCPCMAudioCapturer.h new file mode 100644 index 0000000000..b03e657fa2 --- /dev/null +++ b/sdk/objc/api/peerconnection/RTCPCMAudioCapturer.h @@ -0,0 +1,39 @@ +/* + * Copyright 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import +#import + +#import "sdk/objc/base/RTCMacros.h" + +NS_ASSUME_NONNULL_BEGIN + +@protocol RTC_OBJC_TYPE(RTCAudioCapturerDelegate); + +RTC_OBJC_EXPORT +@interface RTC_OBJC_TYPE(RTCPCMAudioCapturer) : NSObject + +- (instancetype)init NS_UNAVAILABLE; +- (instancetype)initWithDelegate:(id)delegate + NS_DESIGNATED_INITIALIZER; + +/** + * Ingests a CMSampleBuffer containing linear PCM audio, normalizes it into 10 ms + * chunks, and forwards the data to the delegate. + * Unsupported formats are dropped and logged. + */ +- (void)captureBuffer:(CMSampleBufferRef)buffer; + +@property(nonatomic, readonly) + id delegate; + +@end + +NS_ASSUME_NONNULL_END diff --git a/sdk/objc/api/peerconnection/RTCPCMAudioCapturer.mm b/sdk/objc/api/peerconnection/RTCPCMAudioCapturer.mm new file mode 100644 index 0000000000..f895941323 --- /dev/null +++ b/sdk/objc/api/peerconnection/RTCPCMAudioCapturer.mm @@ -0,0 +1,201 @@ +/* + * Copyright 2024 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import "RTCPCMAudioCapturer.h" + +#import +#import + +#include + +#import "RTCLogging.h" +#import "RTCAudioFrame.h" +#import "RTCAudioSource.h" + +namespace { +constexpr size_t kBitsPerSample = 16; +constexpr double kChunkDurationMs = 10.0; +} + +@interface RTC_OBJC_TYPE(RTCPCMAudioCapturer) () { + id _delegate; + NSMutableData *_pendingData; + int _sampleRateHz; + NSUInteger _channels; + uint32_t _timestamp; + BOOL _hasPendingTimestampMs; + double _pendingTimestampMs; +} +@end + +@implementation RTC_OBJC_TYPE(RTCPCMAudioCapturer) + +- (instancetype)initWithDelegate:(id)delegate { + NSParameterAssert(delegate); + self = [super init]; + if (self) { + _delegate = delegate; + _pendingData = [NSMutableData data]; + _sampleRateHz = 0; + _channels = 0; + _timestamp = 0; + _hasPendingTimestampMs = NO; + _pendingTimestampMs = 0.0; + } + return self; +} + +- (id)delegate { + return _delegate; +} + +- (void)captureBuffer:(CMSampleBufferRef)buffer { + if (!buffer) { + return; + } + + size_t sampleCount = CMSampleBufferGetNumSamples(buffer); + if (sampleCount == 0) { + return; + } + + CMAudioFormatDescriptionRef format = CMSampleBufferGetFormatDescription(buffer); + if (!format) { + RTCLogError(@"CMSampleBuffer without format description"); + return; + } + + const AudioStreamBasicDescription *asbd = + CMAudioFormatDescriptionGetStreamBasicDescription(format); + if (!asbd) { + RTCLogError(@"Unable to obtain stream description from CMSampleBuffer"); + return; + } + + if (asbd->mFormatID != kAudioFormatLinearPCM) { + RTCLogError(@"Unsupported audio format: %u", static_cast(asbd->mFormatID)); + return; + } + + if (!(asbd->mFormatFlags & kAudioFormatFlagIsSignedInteger) || + (asbd->mFormatFlags & kAudioFormatFlagIsFloat) || + (asbd->mFormatFlags & kAudioFormatFlagIsNonInterleaved) || + (asbd->mFormatFlags & kAudioFormatFlagIsBigEndian) || + asbd->mBitsPerChannel != kBitsPerSample) { + RTCLogError(@"CMSampleBuffer must contain interleaved 16-bit PCM audio"); + return; + } + + int sampleRate = static_cast(asbd->mSampleRate); + if (sampleRate <= 0) { + RTCLogError(@"Invalid sample rate %d", sampleRate); + return; + } + + NSUInteger channels = asbd->mChannelsPerFrame; + if (channels == 0) { + RTCLogError(@"Audio buffer reports zero channels"); + return; + } + + size_t framesPerChunk = static_cast(sampleRate / 100); + if (framesPerChunk == 0 || sampleRate % 100 != 0) { + RTCLogError(@"Sample rate %d unsupported for 10ms framing", sampleRate); + return; + } + + BOOL formatChanged = (_sampleRateHz != sampleRate) || (_channels != channels); + if (formatChanged) { + _sampleRateHz = sampleRate; + _channels = channels; + _timestamp = 0; + [_pendingData setLength:0]; + _hasPendingTimestampMs = NO; + } + + size_t bytesPerFrame = channels * sizeof(int16_t); + size_t chunkBytes = framesPerChunk * bytesPerFrame; + + size_t bufferListSize = 0; + OSStatus status = CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer( + buffer, &bufferListSize, nullptr, 0, nullptr, nullptr, 0, nullptr); + if (status != noErr) { + RTCLogError(@"Failed to query audio buffer list size (status=%d)", status); + return; + } + + AudioBufferList *audioBufferList = + static_cast(malloc(bufferListSize)); + if (!audioBufferList) { + RTCLogError(@"Failed to allocate audio buffer list"); + return; + } + + CMBlockBufferRef blockBuffer = nullptr; + status = CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer( + buffer, &bufferListSize, audioBufferList, bufferListSize, nullptr, nullptr, + 0, &blockBuffer); + if (status != noErr) { + RTCLogError(@"Failed to extract audio buffer list (status=%d)", status); + free(audioBufferList); + return; + } + + for (UInt32 i = 0; i < audioBufferList->mNumberBuffers; ++i) { + const AudioBuffer &audioBuffer = audioBufferList->mBuffers[i]; + if (audioBuffer.mData && audioBuffer.mDataByteSize > 0) { + [_pendingData appendBytes:audioBuffer.mData length:audioBuffer.mDataByteSize]; + } + } + + CFRelease(blockBuffer); + free(audioBufferList); + + CMTime presentationTime = CMSampleBufferGetPresentationTimeStamp(buffer); + BOOL hasPts = CMTIME_IS_NUMERIC(presentationTime); + double nextChunkTimestampMs = NAN; + if (_hasPendingTimestampMs) { + nextChunkTimestampMs = _pendingTimestampMs; + _hasPendingTimestampMs = NO; + } else if (hasPts) { + nextChunkTimestampMs = CMTimeGetSeconds(presentationTime) * 1000.0; + } + + while (_pendingData.length >= chunkBytes) { + NSData *chunk = [_pendingData subdataWithRange:NSMakeRange(0, chunkBytes)]; + [_pendingData replaceBytesInRange:NSMakeRange(0, chunkBytes) + withBytes:nullptr + length:0]; + + NSNumber *absoluteCaptureTimestamp = nil; + if (!std::isnan(nextChunkTimestampMs)) { + absoluteCaptureTimestamp = + @(llround(nextChunkTimestampMs)); + nextChunkTimestampMs += kChunkDurationMs; + } + + RTC_OBJC_TYPE(RTCAudioFrame) *frame = + [[RTC_OBJC_TYPE(RTCAudioFrame) alloc] initWithData:chunk + sampleRateHz:_sampleRateHz + channels:_channels + framesPerChannel:framesPerChunk + timestamp:_timestamp + absoluteCaptureTimestampMs:absoluteCaptureTimestamp]; + [_delegate pushAudioFrame:frame]; + _timestamp += framesPerChunk; + } + + if (_pendingData.length > 0 && !std::isnan(nextChunkTimestampMs)) { + _pendingTimestampMs = nextChunkTimestampMs; + _hasPendingTimestampMs = YES; + } +} + +@end diff --git a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h index 898a43a5c9..9370937fad 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h +++ b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h @@ -17,6 +17,7 @@ NS_ASSUME_NONNULL_BEGIN @class RTC_OBJC_TYPE(RTCRtpCapabilities); @class RTC_OBJC_TYPE(RTCAudioSource); +@class RTC_OBJC_TYPE(RTCStandaloneAudioSource); @class RTC_OBJC_TYPE(RTCAudioTrack); @class RTC_OBJC_TYPE(RTCConfiguration); @class RTC_OBJC_TYPE(RTCMediaConstraints); @@ -92,6 +93,10 @@ RTC_OBJC_EXPORT (nullable RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints standalone:(BOOL)standalone; +/** Convenience helper that returns a standalone audio source. */ +- (RTC_OBJC_TYPE(RTCStandaloneAudioSource) *)standaloneAudioSourceWithConstraints: + (nullable RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints; + /** Initialize an RTCAudioTrack with an id. Convenience ctor to use an audio source * with no constraints. */ diff --git a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm index 9ba7e68a0e..acc6d12020 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm +++ b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm @@ -404,6 +404,13 @@ - (instancetype)initWithNativeAudioEncoderFactory: return [self audioTrackWithSource:audioSource trackId:trackId]; } +- (RTC_OBJC_TYPE(RTCStandaloneAudioSource) *) + standaloneAudioSourceWithConstraints: + (nullable RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints { + return static_cast( + [self audioSourceWithConstraints:constraints standalone:YES]); +} + - (RTC_OBJC_TYPE(RTCAudioTrack) *)audioTrackWithSource:(RTC_OBJC_TYPE(RTCAudioSource) *)source trackId:(NSString *)trackId { return [[RTC_OBJC_TYPE(RTCAudioTrack) alloc] initWithFactory:self source:source trackId:trackId]; From 88e2a68555d1c70a0477517ae3791ec388fcdc9e Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Mon, 22 Sep 2025 15:57:53 +0300 Subject: [PATCH 11/15] Fix threading issue --- .../peerconnection/RTCStandaloneAudioSource.mm | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm index b0790ea979..71f81b1503 100644 --- a/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm +++ b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm @@ -29,6 +29,7 @@ @implementation RTC_OBJC_TYPE(RTCStandaloneAudioSource) { rtc::scoped_refptr _nativeStandaloneSource; + rtc::Thread *_signalingThread; } @synthesize nativeStandaloneSource = _nativeStandaloneSource; @@ -41,6 +42,7 @@ - (instancetype)initWithFactory:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)facto self = [super initWithFactory:factory nativeAudioSource:nativeStandaloneSource]; if (self) { _nativeStandaloneSource = std::move(nativeStandaloneSource); + _signalingThread = factory.signalingThread; } return self; } @@ -52,10 +54,24 @@ - (instancetype)initWithFactory:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)facto } - (void)start { + if (_signalingThread && !_signalingThread->IsCurrent()) { + _signalingThread->BlockingCall([self] { + [self start]; + }); + return; + } + _nativeStandaloneSource->Start(); } - (void)stop { + if (_signalingThread && !_signalingThread->IsCurrent()) { + _signalingThread->BlockingCall([self] { + [self stop]; + }); + return; + } + _nativeStandaloneSource->Stop(); } From 30655766fdd00fd3a408771b943f004668afa3ff Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Mon, 22 Sep 2025 17:34:11 +0300 Subject: [PATCH 12/15] Fix racing and mic & standalone tracks getting merged --- audio/audio_send_stream.cc | 10 ++++-- call/audio_send_stream.cc | 2 ++ call/audio_send_stream.h | 5 +++ media/base/fake_media_engine.cc | 23 ++++++++++++++ media/base/fake_media_engine.h | 1 + media/base/media_channel.h | 3 ++ media/engine/fake_webrtc_call.h | 3 ++ media/engine/webrtc_voice_engine.cc | 27 ++++++++++++++++ media/engine/webrtc_voice_engine.h | 1 + pc/rtp_sender.cc | 48 ++++++++++++++++++++--------- 10 files changed, 106 insertions(+), 17 deletions(-) diff --git a/audio/audio_send_stream.cc b/audio/audio_send_stream.cc index 275967e9ba..1adacdbc05 100644 --- a/audio/audio_send_stream.cc +++ b/audio/audio_send_stream.cc @@ -348,8 +348,10 @@ void AudioSendStream::Start() { } channel_send_->StartSend(); sending_ = true; - audio_state()->AddSendingStream(this, encoder_sample_rate_hz_, - encoder_num_channels_); + if (!config_.bypass_audio_transport) { + audio_state()->AddSendingStream(this, encoder_sample_rate_hz_, + encoder_num_channels_); + } } void AudioSendStream::Stop() { @@ -361,7 +363,9 @@ void AudioSendStream::Stop() { RemoveBitrateObserver(); channel_send_->StopSend(); sending_ = false; - audio_state()->RemoveSendingStream(this); + if (!config_.bypass_audio_transport) { + audio_state()->RemoveSendingStream(this); + } } void AudioSendStream::SendAudioData(std::unique_ptr audio_frame) { diff --git a/call/audio_send_stream.cc b/call/audio_send_stream.cc index 612d27a60c..5aea37823a 100644 --- a/call/audio_send_stream.cc +++ b/call/audio_send_stream.cc @@ -41,6 +41,8 @@ std::string AudioSendStream::Config::ToString() const { ss << ", has_dscp: " << (has_dscp ? "true" : "false"); ss << ", send_codec_spec: " << (send_codec_spec ? send_codec_spec->ToString() : ""); + ss << ", bypass_audio_transport: " + << (bypass_audio_transport ? "true" : "false"); ss << "}"; return ss.Release(); } diff --git a/call/audio_send_stream.h b/call/audio_send_stream.h index 11f5ede921..392fd21286 100644 --- a/call/audio_send_stream.h +++ b/call/audio_send_stream.h @@ -170,6 +170,11 @@ class AudioSendStream : public AudioSender { // An optional frame transformer used by insertable streams to transform // encoded frames. scoped_refptr frame_transformer; + + // When true, the stream skips registration with the shared + // AudioTransportImpl microphone pipeline and expects audio frames to be + // supplied manually via SendAudioData(). + bool bypass_audio_transport = false; }; virtual ~AudioSendStream() = default; diff --git a/media/base/fake_media_engine.cc b/media/base/fake_media_engine.cc index ff2e35c41f..e8d752c4f4 100644 --- a/media/base/fake_media_engine.cc +++ b/media/base/fake_media_engine.cc @@ -348,6 +348,29 @@ FakeVoiceMediaSendChannel::GetFakeAudioSendStreamForTesting(uint32_t ssrc) { auto it = standalone_send_streams_.find(ssrc); return it != standalone_send_streams_.end() ? it->second.get() : nullptr; } + +bool FakeVoiceMediaSendChannel::SetStandaloneAudioMode(uint32_t ssrc, + bool enabled) { + auto it = standalone_send_streams_.find(ssrc); + if (!enabled) { + if (it != standalone_send_streams_.end()) { + it->second->set_bypass_audio_transport(false); + } + return true; + } + + if (it == standalone_send_streams_.end()) { + webrtc::AudioSendStream::Config config(/*send_transport=*/nullptr); + config.rtp.ssrc = ssrc; + auto stream = std::make_unique(ssrc, config); + stream->set_bypass_audio_transport(true); + standalone_send_streams_.emplace(ssrc, std::move(stream)); + return true; + } + + it->second->set_bypass_audio_transport(true); + return true; +} #endif bool FakeVoiceMediaSendChannel::SetSendCodecs( const std::vector& codecs) { diff --git a/media/base/fake_media_engine.h b/media/base/fake_media_engine.h index 242423146c..9f5b1b9901 100644 --- a/media/base/fake_media_engine.h +++ b/media/base/fake_media_engine.h @@ -622,6 +622,7 @@ class FakeVoiceMediaSendChannel webrtc::AudioSendStream* GetAudioSendStream(uint32_t ssrc) override; webrtc::FakeAudioSendStream* GetFakeAudioSendStreamForTesting( uint32_t ssrc); + bool SetStandaloneAudioMode(uint32_t ssrc, bool enabled) override; #endif private: diff --git a/media/base/media_channel.h b/media/base/media_channel.h index 6fcfcf7b9b..8230970c2f 100644 --- a/media/base/media_channel.h +++ b/media/base/media_channel.h @@ -899,6 +899,9 @@ class VoiceMediaSendChannelInterface : public MediaSendChannelInterface { #if defined(WEBRTC_IOS) // Returns the underlying AudioSendStream associated with `ssrc`, if any. virtual webrtc::AudioSendStream* GetAudioSendStream(uint32_t ssrc) = 0; + // Enables or disables bypass of the shared AudioTransportImpl pipeline for + // the send stream identified by `ssrc`. + virtual bool SetStandaloneAudioMode(uint32_t ssrc, bool enabled) = 0; #endif // Returns if the telephone-event has been negotiated. virtual bool CanInsertDtmf() = 0; diff --git a/media/engine/fake_webrtc_call.h b/media/engine/fake_webrtc_call.h index 4890598b7e..6d39ce3941 100644 --- a/media/engine/fake_webrtc_call.h +++ b/media/engine/fake_webrtc_call.h @@ -89,6 +89,9 @@ class FakeAudioSendStream final : public AudioSendStream { bool IsSending() const { return sending_; } bool muted() const { return muted_; } int sent_audio_frames() const { return sent_audio_frames_; } + void set_bypass_audio_transport(bool bypass) { + config_.bypass_audio_transport = bypass; + } private: // webrtc::AudioSendStream implementation. diff --git a/media/engine/webrtc_voice_engine.cc b/media/engine/webrtc_voice_engine.cc index 21fb6fa3dc..c1de260d2b 100644 --- a/media/engine/webrtc_voice_engine.cc +++ b/media/engine/webrtc_voice_engine.cc @@ -874,6 +874,22 @@ class WebRtcVoiceSendChannel::WebRtcAudioSendStream : public AudioSource::Sink { #if defined(WEBRTC_IOS) webrtc::AudioSendStream* audio_send_stream() const { return stream_; } + void SetBypassAudioTransport(bool bypass) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + if (config_.bypass_audio_transport == bypass) { + return; + } + const bool was_running = send_ && rtp_parameters_.encodings[0].active; + if (was_running) { + stream_->Stop(); + } + config_.bypass_audio_transport = bypass; + stream_->Reconfigure(config_, nullptr); + if (was_running) { + stream_->Start(); + stream_->SetMuted(muted_); + } + } #endif void SetSendCodecSpec( @@ -1872,6 +1888,17 @@ webrtc::AudioSendStream* WebRtcVoiceSendChannel::GetAudioSendStream( } return matching_stream->second->audio_send_stream(); } + +bool WebRtcVoiceSendChannel::SetStandaloneAudioMode(uint32_t ssrc, + bool enabled) { + RTC_DCHECK_RUN_ON(worker_thread_); + auto matching_stream = send_streams_.find(ssrc); + if (matching_stream == send_streams_.end()) { + return false; + } + matching_stream->second->SetBypassAudioTransport(enabled); + return true; +} #endif RtpParameters WebRtcVoiceSendChannel::GetRtpSendParameters( diff --git a/media/engine/webrtc_voice_engine.h b/media/engine/webrtc_voice_engine.h index 19ea2235de..d678d78aba 100644 --- a/media/engine/webrtc_voice_engine.h +++ b/media/engine/webrtc_voice_engine.h @@ -256,6 +256,7 @@ scoped_refptr frame_transformer) override; #if defined(WEBRTC_IOS) webrtc::AudioSendStream* GetAudioSendStream(uint32_t ssrc) override; + bool SetStandaloneAudioMode(uint32_t ssrc, bool enabled) override; #endif bool SenderNackEnabled() const override { diff --git a/pc/rtp_sender.cc b/pc/rtp_sender.cc index 996847e523..4324025ad2 100644 --- a/pc/rtp_sender.cc +++ b/pc/rtp_sender.cc @@ -718,22 +718,15 @@ void StandaloneAudioFrameForwarder::OnData( size_t number_of_frames, std::optional absolute_capture_timestamp_ms) { RTC_DCHECK_EQ(bits_per_sample, 16); - AudioSendStream* send_stream = nullptr; - uint32_t timestamp = 0; - { - MutexLock lock(&lock_); - if (!enabled_ || !send_stream_) { - return; - } - send_stream = send_stream_; - timestamp = timestamp_; - timestamp_ += static_cast(number_of_frames); - } - RTC_DCHECK(audio_data); auto frame = std::make_unique(); - frame->UpdateFrame(timestamp, static_cast(audio_data), + MutexLock lock(&lock_); + if (!enabled_ || !send_stream_) { + return; + } + + frame->UpdateFrame(timestamp_, static_cast(audio_data), number_of_frames, sample_rate, AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, number_of_channels); @@ -741,7 +734,10 @@ void StandaloneAudioFrameForwarder::OnData( frame->set_absolute_capture_timestamp_ms( *absolute_capture_timestamp_ms); } - send_stream->SendAudioData(std::move(frame)); + timestamp_ += static_cast(number_of_frames); + // Keep the lock while forwarding to guarantee serialized access into the + // AudioSendStream and satisfy its race checker. + send_stream_->SendAudioData(std::move(frame)); } #endif @@ -902,6 +898,13 @@ void AudioRtpSender::SetSend() { bool track_enabled = track_->enabled(); #if defined(WEBRTC_IOS) if (using_standalone_source_) { + bool mode_set = worker_thread_->BlockingCall([&] { + return voice_media_channel()->SetStandaloneAudioMode(ssrc_, true); + }); + if (!mode_set) { + RTC_LOG(LS_ERROR) << "Failed to enable standalone audio mode for ssrc " + << ssrc_; + } bool success = worker_thread_->BlockingCall([&] { return voice_media_channel()->SetAudioSend(ssrc_, track_enabled, &options, nullptr); @@ -923,6 +926,15 @@ void AudioRtpSender::SetSend() { } return; } +#endif +#if defined(WEBRTC_IOS) + bool mode_reset = worker_thread_->BlockingCall([&] { + return voice_media_channel()->SetStandaloneAudioMode(ssrc_, false); + }); + if (!mode_reset) { + RTC_LOG(LS_WARNING) + << "Failed to disable standalone audio mode for ssrc " << ssrc_; + } #endif bool success = worker_thread_->BlockingCall([&] { return voice_media_channel()->SetAudioSend(ssrc_, track_enabled, &options, @@ -948,6 +960,14 @@ void AudioRtpSender::ClearSend() { standalone_frame_forwarder_->SetEnabled(false); standalone_frame_forwarder_->SetSendStream(nullptr); } + bool mode_set = worker_thread_->BlockingCall([&] { + return voice_media_channel()->SetStandaloneAudioMode(ssrc_, true); + }); + if (!mode_set) { + RTC_LOG(LS_WARNING) + << "Failed to keep standalone audio mode enabled for ssrc " + << ssrc_; + } bool success = worker_thread_->BlockingCall([&] { return voice_media_channel()->SetAudioSend(ssrc_, false, &options, nullptr); From 6ae3a23757880890fe66d8376ab4fd476508164f Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Tue, 23 Sep 2025 16:43:34 +0300 Subject: [PATCH 13/15] Add logs --- audio/channel_send.cc | 82 ++++++++++ .../api/peerconnection/RTCPCMAudioCapturer.mm | 143 ++++++++++++++++++ .../RTCStandaloneAudioSource.mm | 30 ++++ .../src/standalone_audio_track_source.cc | 33 ++++ 4 files changed, 288 insertions(+) diff --git a/audio/channel_send.cc b/audio/channel_send.cc index a71322ab30..845fd9552a 100644 --- a/audio/channel_send.cc +++ b/audio/channel_send.cc @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -67,6 +68,7 @@ #include "rtc_base/synchronization/mutex.h" #include "rtc_base/system/no_unique_address.h" #include "rtc_base/thread_annotations.h" +#include "rtc_base/third_party/base64/base64.h" #include "rtc_base/trace_event.h" #include "system_wrappers/include/metrics.h" @@ -78,6 +80,40 @@ namespace { constexpr TimeDelta kMaxRetransmissionWindow = TimeDelta::Seconds(1); constexpr TimeDelta kMinRetransmissionWindow = TimeDelta::Millis(30); +struct PcmFrameStats { + double rms; + int32_t max_abs; + bool all_zero; +}; + +PcmFrameStats ComputePcmFrameStats(const int16_t* samples, size_t sample_count) { + PcmFrameStats stats{0.0, 0, true}; + if (!samples || sample_count == 0) { + return stats; + } + + long double sum_squares = 0; + int32_t peak = 0; + bool all_zero = true; + + for (size_t i = 0; i < sample_count; ++i) { + const int32_t sample = samples[i]; + if (sample != 0) { + all_zero = false; + } + const int32_t abs_sample = sample < 0 ? -sample : sample; + if (abs_sample > peak) { + peak = abs_sample; + } + sum_squares += static_cast(sample) * sample; + } + + stats.rms = std::sqrt(static_cast(sum_squares / sample_count)); + stats.max_abs = peak; + stats.all_zero = all_zero; + return stats; +} + class RtpPacketSenderProxy; class TransportSequenceNumberProxy; @@ -475,6 +511,20 @@ int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType, if (include_audio_level_indication_.load() && audio_level_dbov) { frame.audio_level_dbov = *audio_level_dbov; } + + const std::string audio_level_str = audio_level_dbov + ? std::to_string(static_cast(*audio_level_dbov)) + : std::string("none"); + RTC_LOG(LS_VERBOSE) + << "ChannelSend::SendRtpAudio frameType=" << static_cast(frameType) + << " payloadType=" << static_cast(payloadType) + << " rtp_timestamp=" << frame.rtp_timestamp + << " payload_bytes=" << payload.size() + << " audio_level_dbov=" << audio_level_str + << " abs_capture_ms=" + << (absolute_capture_timestamp_ms > 0 + ? std::to_string(absolute_capture_timestamp_ms) + : std::string("none")); if (!rtp_sender_audio_->SendAudio(frame)) { RTC_DLOG(LS_ERROR) << "ChannelSend::SendData() failed to send data to RTP/RTCP module"; @@ -868,9 +918,30 @@ void ChannelSend::ProcessAndEncodeAudio( audio_frame->ElapsedProfileTimeMs()); bool is_muted = InputMute(); + static std::atomic standalone_frame_counter{0}; + const int count = ++standalone_frame_counter; AudioFrameOperations::Mute(audio_frame.get(), previous_frame_muted_, is_muted); + const size_t samples_per_packet = + audio_frame->samples_per_channel_ * audio_frame->num_channels_; + const PcmFrameStats pcm_stats = + ComputePcmFrameStats(audio_frame->data(), samples_per_packet); + const std::string capture_ts = audio_frame->absolute_capture_timestamp_ms() + ? std::to_string(*audio_frame->absolute_capture_timestamp_ms()) + : std::string("none"); + RTC_LOG(LS_VERBOSE) + << "ChannelSend::ProcessAndEncodeAudio frame#" << count + << " muted=" << (is_muted ? "true" : "false") + << " samples/ch=" << audio_frame->samples_per_channel_ + << " channels=" << audio_frame->num_channels_ + << " rate=" << audio_frame->sample_rate_hz_ + << " timestamp=" << audio_frame->timestamp_ + << " abs_capture_ms=" << capture_ts + << " pcm_rms=" << pcm_stats.rms + << " pcm_peak=" << pcm_stats.max_abs + << " pcm_all_zero=" << (pcm_stats.all_zero ? "true" : "false"); + if (include_audio_level_indication_.load()) { size_t length = audio_frame->samples_per_channel_ * audio_frame->num_channels_; @@ -889,6 +960,17 @@ void ChannelSend::ProcessAndEncodeAudio( // transmission. Otherwise, it will return without invoking the // callback. int32_t encoded_bytes = audio_coding_->Add10MsData(*audio_frame); + if (count % 100 == 0) { + RTC_LOG(LS_INFO) + << "ChannelSend::ProcessAndEncodeAudio muted=" + << (is_muted ? "true" : "false") + << " samples/ch=" << audio_frame->samples_per_channel_ + << " rate=" << audio_frame->sample_rate_hz_ + << " channels=" << audio_frame->num_channels_ + << " encoded_bytes=" << encoded_bytes + << " pcm_rms=" << pcm_stats.rms + << " pcm_peak=" << pcm_stats.max_abs; + } MutexLock lock(&bitrate_accountant_mutex_); if (encoded_bytes < 0) { RTC_DLOG(LS_ERROR) << "ACM::Add10MsData() failed."; diff --git a/sdk/objc/api/peerconnection/RTCPCMAudioCapturer.mm b/sdk/objc/api/peerconnection/RTCPCMAudioCapturer.mm index f895941323..756576ef37 100644 --- a/sdk/objc/api/peerconnection/RTCPCMAudioCapturer.mm +++ b/sdk/objc/api/peerconnection/RTCPCMAudioCapturer.mm @@ -22,6 +22,47 @@ namespace { constexpr size_t kBitsPerSample = 16; constexpr double kChunkDurationMs = 10.0; +struct PCMLevelStats { + double rms; + int32_t maxAbs; + bool allZero; +}; + +struct PCMLevelAccumulator { + long double sumSquares = 0; + int32_t maxAbs = 0; + size_t sampleCount = 0; + bool allZero = true; + + void AddSamples(const int16_t *samples, size_t count) { + if (!samples || count == 0) { + return; + } + sampleCount += count; + for (size_t i = 0; i < count; ++i) { + const int32_t sample = samples[i]; + if (sample != 0) { + allZero = false; + } + const int32_t absSample = sample < 0 ? -sample : sample; + if (absSample > maxAbs) { + maxAbs = absSample; + } + sumSquares += static_cast(sample) * + static_cast(sample); + } + } + + PCMLevelStats Finalize() const { + PCMLevelStats stats; + stats.maxAbs = maxAbs; + stats.allZero = allZero; + stats.rms = sampleCount > 0 + ? std::sqrt(static_cast(sumSquares / sampleCount)) + : 0.0; + return stats; + } +}; } @interface RTC_OBJC_TYPE(RTCPCMAudioCapturer) () { @@ -32,6 +73,9 @@ @interface RTC_OBJC_TYPE(RTCPCMAudioCapturer) () { uint32_t _timestamp; BOOL _hasPendingTimestampMs; double _pendingTimestampMs; + uint32_t _frameCounter; + CFAbsoluteTime _lastFormatResetTime; + size_t _consecutiveZeroFrames; } @end @@ -48,6 +92,9 @@ - (instancetype)initWithDelegate:(id)de _timestamp = 0; _hasPendingTimestampMs = NO; _pendingTimestampMs = 0.0; + _frameCounter = 0; + _lastFormatResetTime = 0; + _consecutiveZeroFrames = 0; } return self; } @@ -113,15 +160,29 @@ - (void)captureBuffer:(CMSampleBufferRef)buffer { BOOL formatChanged = (_sampleRateHz != sampleRate) || (_channels != channels); if (formatChanged) { + const size_t pendingBeforeReset = _pendingData.length; + const CFAbsoluteTime now = CFAbsoluteTimeGetCurrent(); + const double deltaMs = _lastFormatResetTime > 0 + ? (now - _lastFormatResetTime) * 1000.0 + : 0.0; _sampleRateHz = sampleRate; _channels = channels; _timestamp = 0; [_pendingData setLength:0]; _hasPendingTimestampMs = NO; + _frameCounter = 0; + _lastFormatResetTime = now; + _consecutiveZeroFrames = 0; + RTCLogInfo(@"RTCPCMAudioCapturer format reset: %d Hz, %lu ch pending=%zuB delta=%.2fms", + _sampleRateHz, + (unsigned long)_channels, + pendingBeforeReset, + deltaMs); } size_t bytesPerFrame = channels * sizeof(int16_t); size_t chunkBytes = framesPerChunk * bytesPerFrame; + const size_t pendingBytesBeforeAppend = _pendingData.length; size_t bufferListSize = 0; OSStatus status = CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer( @@ -148,6 +209,37 @@ - (void)captureBuffer:(CMSampleBufferRef)buffer { return; } + size_t totalIncomingBytes = 0; + PCMLevelAccumulator intakeAccumulator; + for (UInt32 i = 0; i < audioBufferList->mNumberBuffers; ++i) { + const AudioBuffer &audioBuffer = audioBufferList->mBuffers[i]; + if (audioBuffer.mData && audioBuffer.mDataByteSize > 0) { + totalIncomingBytes += audioBuffer.mDataByteSize; + intakeAccumulator.AddSamples(static_cast(audioBuffer.mData), + audioBuffer.mDataByteSize / sizeof(int16_t)); + } + } + if (totalIncomingBytes > 0) { + const PCMLevelStats intakeStats = intakeAccumulator.Finalize(); + RTCLogInfo(@"RTCPCMAudioCapturer PCM buffer: rate=%.1fHz channels=%u bits=%u flags=0x%08x incoming=%zuB pendingBefore=%zuB rms=%.2f max=%d zero=%@", + asbd->mSampleRate, + static_cast(asbd->mChannelsPerFrame), + static_cast(asbd->mBitsPerChannel), + static_cast(asbd->mFormatFlags), + totalIncomingBytes, + pendingBytesBeforeAppend, + intakeStats.rms, + intakeStats.maxAbs, + intakeStats.allZero ? @"YES" : @"NO"); + } else { + RTCLogInfo(@"RTCPCMAudioCapturer PCM buffer: rate=%.1fHz channels=%u bits=%u flags=0x%08x incoming=0B pendingBefore=%zuB", + asbd->mSampleRate, + static_cast(asbd->mChannelsPerFrame), + static_cast(asbd->mBitsPerChannel), + static_cast(asbd->mFormatFlags), + pendingBytesBeforeAppend); + } + for (UInt32 i = 0; i < audioBufferList->mNumberBuffers; ++i) { const AudioBuffer &audioBuffer = audioBufferList->mBuffers[i]; if (audioBuffer.mData && audioBuffer.mDataByteSize > 0) { @@ -181,6 +273,29 @@ - (void)captureBuffer:(CMSampleBufferRef)buffer { nextChunkTimestampMs += kChunkDurationMs; } + ++_frameCounter; + const size_t chunkLength = chunk.length; + if (chunkLength != chunkBytes) { + RTCLogWarning(@"RTCPCMAudioCapturer unexpected chunk size: %zuB (expected %zuB) pending=%luB", + chunkLength, + chunkBytes, + (unsigned long)_pendingData.length); + } + PCMLevelAccumulator chunkAccumulator; + chunkAccumulator.AddSamples(static_cast(chunk.bytes), + chunkLength / sizeof(int16_t)); + const PCMLevelStats chunkStats = chunkAccumulator.Finalize(); + RTCLogVerbose(@"RTCPCMAudioCapturer chunk #%u frames=%zu bytes=%zu pending=%luB ts=%u absTs=%@ rms=%.2f max=%d zero=%@", + _frameCounter, + framesPerChunk, + chunkLength, + (unsigned long)_pendingData.length, + _timestamp, + absoluteCaptureTimestamp ? absoluteCaptureTimestamp.stringValue : @"nil", + chunkStats.rms, + chunkStats.maxAbs, + chunkStats.allZero ? @"YES" : @"NO"); + RTC_OBJC_TYPE(RTCAudioFrame) *frame = [[RTC_OBJC_TYPE(RTCAudioFrame) alloc] initWithData:chunk sampleRateHz:_sampleRateHz @@ -188,6 +303,34 @@ - (void)captureBuffer:(CMSampleBufferRef)buffer { framesPerChannel:framesPerChunk timestamp:_timestamp absoluteCaptureTimestampMs:absoluteCaptureTimestamp]; + const NSUInteger expectedFramesPerChannel = + _sampleRateHz > 0 ? static_cast(_sampleRateHz / 100) : 0; + if (expectedFramesPerChannel > 0) { + NSAssert(frame.framesPerChannel == expectedFramesPerChannel, + @"Unexpected framesPerChannel %lu (expected %lu) for rate %d", + (unsigned long)frame.framesPerChannel, + (unsigned long)expectedFramesPerChannel, + _sampleRateHz); + } + const NSData *frameData = frame.data; + PCMLevelAccumulator frameAccumulator; + frameAccumulator.AddSamples(static_cast(frameData.bytes), + frameData.length / sizeof(int16_t)); + const PCMLevelStats frameStats = frameAccumulator.Finalize(); + if (frameStats.allZero) { + ++_consecutiveZeroFrames; + } else { + _consecutiveZeroFrames = 0; + } + RTCLogVerbose(@"RTCPCMAudioCapturer push #%u rate=%dHz channels=%lu frames=%lu bytes=%lu rms=%.2f max=%d zeroRun=%zu", + _frameCounter, + frame.sampleRateHz, + (unsigned long)frame.channels, + (unsigned long)frame.framesPerChannel, + (unsigned long)frameData.length, + frameStats.rms, + frameStats.maxAbs, + _consecutiveZeroFrames); [_delegate pushAudioFrame:frame]; _timestamp += framesPerChunk; } diff --git a/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm index 71f81b1503..196cb09d46 100644 --- a/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm +++ b/sdk/objc/api/peerconnection/RTCStandaloneAudioSource.mm @@ -13,6 +13,7 @@ #import "RTCAudioFrame.h" #import "RTCAudioSource+Private.h" #import "RTCPeerConnectionFactory+Private.h" +#import "RTCLogging.h" #include @@ -30,6 +31,7 @@ @implementation RTC_OBJC_TYPE(RTCStandaloneAudioSource) { rtc::scoped_refptr _nativeStandaloneSource; rtc::Thread *_signalingThread; + uint32_t _pushCounter; } @synthesize nativeStandaloneSource = _nativeStandaloneSource; @@ -43,6 +45,7 @@ - (instancetype)initWithFactory:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)facto if (self) { _nativeStandaloneSource = std::move(nativeStandaloneSource); _signalingThread = factory.signalingThread; + _pushCounter = 0; } return self; } @@ -61,6 +64,7 @@ - (void)start { return; } + RTCLogInfo(@"RTCStandaloneAudioSource start"); _nativeStandaloneSource->Start(); } @@ -72,6 +76,7 @@ - (void)stop { return; } + RTCLogInfo(@"RTCStandaloneAudioSource stop"); _nativeStandaloneSource->Stop(); } @@ -94,6 +99,31 @@ - (void)pushAudioFrame:(RTC_OBJC_TYPE(RTCAudioFrame) *)frame { frame.absoluteCaptureTimestampMs.longLongValue); } + ++_pushCounter; + if (_pushCounter % 100 == 1) { + const int16_t *samples = static_cast(frame.data.bytes); + const size_t totalSamples = frame.framesPerChannel * frame.channels; + int32_t maxAbs = 0; + int64_t accumAbs = 0; + for (size_t i = 0; i < totalSamples; ++i) { + int32_t value = samples[i]; + if (value < 0) { + value = -value; + } + if (value > maxAbs) { + maxAbs = value; + } + accumAbs += value; + } + const double meanAbs = totalSamples > 0 + ? static_cast(accumAbs) / + static_cast(totalSamples) + : 0.0; + RTCLogInfo(@"RTCStandaloneAudioSource push #%u: rate=%d Hz channels=%ld frames/ch=%ld avg_abs=%.1f max_abs=%d", + _pushCounter, frame.sampleRateHz, (long)frame.channels, + (long)frame.framesPerChannel, meanAbs, maxAbs); + } + _nativeStandaloneSource->PushAudioFrame(native_frame); } diff --git a/sdk/objc/native/src/standalone_audio_track_source.cc b/sdk/objc/native/src/standalone_audio_track_source.cc index 60ddf1a76f..4a804f2c37 100644 --- a/sdk/objc/native/src/standalone_audio_track_source.cc +++ b/sdk/objc/native/src/standalone_audio_track_source.cc @@ -11,8 +11,11 @@ #include "sdk/objc/native/src/standalone_audio_track_source.h" #include +#include +#include #include "sdk/objc/native/src/standalone_audio_send_helper.h" +#include "rtc_base/logging.h" namespace webrtc { @@ -30,6 +33,7 @@ StandaloneAudioTrackSource::~StandaloneAudioTrackSource() = default; void StandaloneAudioTrackSource::Start() { bool expected = false; if (started_.compare_exchange_strong(expected, true)) { + RTC_LOG(LS_INFO) << "StandaloneAudioTrackSource::Start"; SetState(MediaSourceInterface::kLive); } } @@ -37,6 +41,7 @@ void StandaloneAudioTrackSource::Start() { void StandaloneAudioTrackSource::Stop() { bool expected = true; if (started_.compare_exchange_strong(expected, false)) { + RTC_LOG(LS_INFO) << "StandaloneAudioTrackSource::Stop"; SetState(MediaSourceInterface::kEnded); } } @@ -50,6 +55,8 @@ void StandaloneAudioTrackSource::AddSink(AudioTrackSinkInterface* sink) { MutexLock lock(&sink_lock_); RTC_DCHECK(std::find(sinks_.begin(), sinks_.end(), sink) == sinks_.end()); sinks_.push_back(sink); + RTC_LOG(LS_INFO) << "StandaloneAudioTrackSource::AddSink count=" + << sinks_.size(); } void StandaloneAudioTrackSource::RemoveSink(AudioTrackSinkInterface* sink) { @@ -58,6 +65,8 @@ void StandaloneAudioTrackSource::RemoveSink(AudioTrackSinkInterface* sink) { auto it = std::find(sinks_.begin(), sinks_.end(), sink); if (it != sinks_.end()) { sinks_.erase(it); + RTC_LOG(LS_INFO) << "StandaloneAudioTrackSource::RemoveSink count=" + << sinks_.size(); } } @@ -77,6 +86,30 @@ void StandaloneAudioTrackSource::PushAudioFrame(const AudioFrame& frame) { const int sample_rate = frame.sample_rate_hz(); RTC_DCHECK(audio_data); + static std::atomic frame_counter{0}; + const uint32_t current = ++frame_counter; + if (current % 100 == 0) { + const size_t total_samples = samples_per_channel * num_channels; + int32_t max_abs = 0; + int64_t accum_abs = 0; + for (size_t i = 0; i < total_samples; ++i) { + const int32_t value = audio_data[i]; + const int32_t abs_value = std::abs(value); + max_abs = std::max(max_abs, abs_value); + accum_abs += abs_value; + } + const float mean_abs = total_samples > 0 + ? static_cast(accum_abs) / + static_cast(total_samples) + : 0.0f; + RTC_LOG(LS_INFO) << "StandaloneAudioTrackSource push: rate=" << sample_rate + << "Hz channels=" << num_channels + << " samples/ch=" << samples_per_channel + << " avg_abs=" << mean_abs + << " max_abs=" << max_abs + << " muted=" << (frame.muted() ? "true" : "false"); + } + for (auto* sink : sinks_) { sink->OnData(audio_data, kBitsPerSample, sample_rate, num_channels, samples_per_channel, frame.absolute_capture_timestamp_ms()); From 9edda3589ed81a234bd62c4374cf524a929f5192 Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Fri, 26 Sep 2025 14:35:57 +0300 Subject: [PATCH 14/15] Fix build issues --- fastlane/lanes/gclient | 4 +- fastlane/lanes/ios | 59 ++++++++++++++++---- sdk/objc/api/peerconnection/RTCAudioFrame.mm | 4 +- 3 files changed, 53 insertions(+), 14 deletions(-) diff --git a/fastlane/lanes/gclient b/fastlane/lanes/gclient index f6d300fc99..53609d16fe 100644 --- a/fastlane/lanes/gclient +++ b/fastlane/lanes/gclient @@ -21,6 +21,8 @@ private_lane :configure_gclient do |options| execute_command(command: "gclient root", verbose: options[:verbose]) # Configure gclient with the spec + target_oses = Array(options[:target_os]).compact.map(&:to_s) + target_os_entries = target_oses.map { |os| "\"#{os}\"" }.join(', ') gclient_spec = <<~SPEC solutions = [ { @@ -31,7 +33,7 @@ private_lane :configure_gclient do |options| "custom_deps": {}, }, ] - target_os = ["#{options[:target_os]}"] + target_os = [#{target_os_entries}] SPEC # Write spec to temporary file and configure diff --git a/fastlane/lanes/ios b/fastlane/lanes/ios index 731873774f..a134aaa445 100644 --- a/fastlane/lanes/ios +++ b/fastlane/lanes/ios @@ -17,6 +17,8 @@ platform :ios do options[:rename_to_sdk_name] = "Stream#{options[:sdk_name]}" options[:product] = File.join(options[:products_root], "#{options[:rename_to_sdk_name]}.xcframework") options[:match_file] = File.join(options[:root], "src/fastlane/Matchfile") + catalyst_option = options.key?(:maccatalyst_support) ? options[:maccatalyst_support] : true + options[:maccatalyst_support] = !%w[false 0 off no].include?(catalyst_option.to_s.downcase) log_info(message: "Root: #{options[:root]}") log_info(message: "Build root: #{options[:build_root]}") @@ -25,6 +27,7 @@ platform :ios do log_info(message: "Build tool: #{options[:build_tool]}") log_info(message: "Matchfile: #{options[:match_file]}") log_info(message: "Product: #{options[:product]}") + log_info(message: "Mac Catalyst support: #{options[:maccatalyst_support]}") setup_ci if is_ci clean_up_products(options) @@ -35,6 +38,7 @@ platform :ios do rename_product(options) prepare_signing(options) sign_product(options) + verify_signatures(options) zip_product(options) end @@ -76,11 +80,15 @@ platform :ios do end lane :configure_google_client do |options| - lane_options = extract_prefixed_options(options, "configure_google_client_") + lane_options = extract_prefixed_options(options, "configure_google_client") next if lane_options[:skip] == true + target_os = ["ios"] + target_os << "mac" if options[:maccatalyst_support] + log_info(message: "Configuring gclient target_os: #{target_os.join(', ')}") + configure_gclient( - target_os: "ios", + target_os: target_os, verbose: options[:verbose], number_of_jobs: options[:number_of_jobs], output: options[:build_root] @@ -92,6 +100,7 @@ platform :ios do next if lane_options[:skip] == true deployment_target = options[:deployment_target] || "13.0" + maccatalyst_support = options[:maccatalyst_support] args_list = resolve_build_product_args( args: extract_prefixed_options(lane_options, "arg"), @@ -102,6 +111,10 @@ platform :ios do Dir.chdir(options[:build_root]) do command_parts = ["\"#{script_path}\""] command_parts << "--deployment-target #{deployment_target}" + archs = ["device:arm64", "simulator:arm64", "simulator:x64"] + archs += ["catalyst:arm64", "catalyst:x64"] if maccatalyst_support + command_parts << "--arch" + command_parts.concat(archs) command_parts << "--extra-gn-args" command_parts.concat(args_list) @@ -113,7 +126,7 @@ platform :ios do end lane :move_product do |options| - lane_options = extract_prefixed_options(options, "move_product_") + lane_options = extract_prefixed_options(options, "move_product") next if lane_options[:skip] == true product_source = options[:product_source] @@ -133,7 +146,7 @@ platform :ios do end lane :rename_product do |options| - lane_options = extract_prefixed_options(options, "rename_product_") + lane_options = extract_prefixed_options(options, "rename_product") next if lane_options[:skip] == true product_source = options[:product_source] @@ -199,7 +212,7 @@ platform :ios do lane :prepare_signing do |options| lane_options = extract_prefixed_options(options, "prepare_signing") - next unless lane_options[:skip] != true + next unless lane_options[:skip] == true custom_match( api_key: appstore_api_key, @@ -209,21 +222,45 @@ platform :ios do end lane :sign_product do |options| - lane_options = extract_prefixed_options(options, "sign_product_") + lane_options = extract_prefixed_options(options, "sign_product") next if lane_options[:skip] == true matchfile = options[:match_file] assert(message: "Missing required option :match_file") if matchfile.to_s.strip.empty? team_id = File.read(matchfile).match(/team_id\("(.*)"\)/)[1] - execute_command( - command: "codesign --force --timestamp -v --sign 'Apple Distribution: Stream.io Inc (#{team_id})' #{options[:product]}", - verbose: options[:verbose] - ) + frameworks = Dir.glob("#{options[:product]}/**/*.framework") + frameworks.each do |framework| + execute_command( + command: "/usr/bin/codesign --force --timestamp --deep -v --sign 'Apple Distribution: Stream.io Inc (#{team_id})' \"#{framework}\"", + verbose: options[:verbose] + ) + end + end + + lane :verify_signatures do |options| + lane_options = extract_prefixed_options(options, "verify_signatures") + next if lane_options[:skip] == true + + product_path = options[:product] + assert(message: "Missing required option :product") if product_path.to_s.strip.empty? + assert(message: "Product not found at #{product_path}") unless File.exist?(product_path) + + frameworks = Dir.glob("#{product_path}/**/*.framework") + assert(message: "No frameworks found to validate signatures") if frameworks.empty? + + log_info(message: "Validating code signatures for #{frameworks.count} frameworks") + + frameworks.each do |framework| + execute_command( + command: "/usr/bin/codesign --verify --deep --strict --verbose=2 \"#{framework}\"", + verbose: options[:verbose] + ) + end end lane :zip_product do |options| - lane_options = extract_prefixed_options(options, "zip_product_") + lane_options = extract_prefixed_options(options, "zip_product") next if lane_options[:skip] == true file_path = options[:product] diff --git a/sdk/objc/api/peerconnection/RTCAudioFrame.mm b/sdk/objc/api/peerconnection/RTCAudioFrame.mm index eb4226c432..4e676ef408 100644 --- a/sdk/objc/api/peerconnection/RTCAudioFrame.mm +++ b/sdk/objc/api/peerconnection/RTCAudioFrame.mm @@ -10,8 +10,8 @@ #import "RTCAudioFrame.h" -static inline size_t BytesForFrame(NSUInteger channels, - NSUInteger framesPerChannel) { +[[maybe_unused]] static inline size_t BytesForFrame(NSUInteger channels, + NSUInteger framesPerChannel) { return channels * framesPerChannel * sizeof(int16_t); } From 29e2572bc354cd3c7d2b0f8529765a591b09ce4f Mon Sep 17 00:00:00 2001 From: Ilias Pavlidakis Date: Wed, 1 Oct 2025 12:44:56 +0300 Subject: [PATCH 15/15] Add compute_zip_checksum lane --- fastlane/lanes/ios | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/fastlane/lanes/ios b/fastlane/lanes/ios index a134aaa445..e475e3284f 100644 --- a/fastlane/lanes/ios +++ b/fastlane/lanes/ios @@ -2,6 +2,7 @@ require 'fileutils' require 'pathname' +require 'shellwords' fastlane_require "fastlane-plugin-stream_actions" platform :ios do @@ -272,6 +273,33 @@ platform :ios do zip_path end + desc "Compute a checksum for the packaged xcframework zip" + lane :compute_zip_checksum do |options| + lane_options = extract_prefixed_options(options, "compute_zip_checksum") + next if lane_options[:skip] == true + + ensure_required_tool(tool: "swift", verbose: options[:verbose]) + + zip_path = lane_options[:zip_path] || options[:zip_path] + if zip_path.to_s.strip.empty? + products_root = options[:products_root] + rename_to_sdk_name = options[:rename_to_sdk_name] + zip_path = File.join(products_root, "#{rename_to_sdk_name}.zip") unless products_root.to_s.strip.empty? || rename_to_sdk_name.to_s.strip.empty? + end + + assert(message: "Missing required option :zip_path") if zip_path.to_s.strip.empty? + assert(message: "Zip file not found at #{zip_path}") unless File.exist?(zip_path) + + command = "swift package compute-checksum #{Shellwords.escape(zip_path)}" + checksum = execute_command( + command: command, + verbose: options[:verbose] + ).to_s.strip + + log_info(message: "Checksum for #{zip_path}: #{checksum}") + checksum + end + private_lane :resolve_build_product_args do |options| args = options[:args] || {}