diff --git a/.gitignore b/.gitignore index 8d6ada89..b70ab983 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,7 @@ dist-ssr .env .env.production .env.test +target # Editor directories and files .vscode/* @@ -26,4 +27,4 @@ dist-ssr *.sln *.sw? .turbo -pnpm-lock.yaml \ No newline at end of file +pnpm-lock.yaml diff --git a/apps/desktop/src-tauri/Cargo.lock b/Cargo.lock similarity index 100% rename from apps/desktop/src-tauri/Cargo.lock rename to Cargo.lock diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 00000000..91e408b4 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,3 @@ +[workspace] +resolver = "2" +members = ["apps/desktop/src-tauri", "crates/*"] diff --git a/apps/desktop/package.json b/apps/desktop/package.json index f50e766b..d89a1472 100644 --- a/apps/desktop/package.json +++ b/apps/desktop/package.json @@ -3,7 +3,7 @@ "private": true, "version": "0.2.7", "scripts": { - "dev": "tauri dev", + "dev": "RUST_BACKTRACE=1 tauri dev", "build": "tsc && next build", "tauri": "tauri" }, diff --git a/apps/desktop/src-tauri/Cargo.toml b/apps/desktop/src-tauri/Cargo.toml index e2d2f4ba..8378b63f 100644 --- a/apps/desktop/src-tauri/Cargo.toml +++ b/apps/desktop/src-tauri/Cargo.toml @@ -14,7 +14,36 @@ tauri-build = { version = "1.5.1", features = [] } ffmpeg-sidecar = "0.5.1" [dependencies] -tauri = { version = "1.6.1", features = [ "system-tray", "updater", "macos-private-api", "window-set-position", "fs-write-file", "fs-remove-file", "fs-read-file", "fs-rename-file", "fs-exists", "fs-remove-dir", "fs-read-dir", "fs-copy-file", "fs-create-dir", "window-set-ignore-cursor-events", "window-unminimize", "window-minimize", "window-close", "window-show", "window-start-dragging", "window-hide", "window-unmaximize", "window-maximize", "window-set-always-on-top", "shell-open", "devtools", "os-all", "http-all", "icon-png"] } +tauri = { version = "1.6.1", features = [ + "system-tray", + "updater", + "macos-private-api", + "window-set-position", + "fs-write-file", + "fs-remove-file", + "fs-read-file", + "fs-rename-file", + "fs-exists", + "fs-remove-dir", + "fs-read-dir", + "fs-copy-file", + "fs-create-dir", + "window-set-ignore-cursor-events", + "window-unminimize", + "window-minimize", + "window-close", + "window-show", + "window-start-dragging", + "window-hide", + "window-unmaximize", + "window-maximize", + "window-set-always-on-top", + "shell-open", + "devtools", + "os-all", + "http-all", + "icon-png", +] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" tauri-plugin-context-menu = "0.7.0" @@ -35,7 +64,7 @@ dotenv_codegen = "0.15.0" byteorder = "1.4.3" bytemuck = "1.14.3" regex = "1" -capture = { path = "./src/capture" } +capture = { path = "../../../crates/capture" } image = "0.24.9" sentry = "0.32.2" fix-path-env = { git = "https://github.com/tauri-apps/fix-path-env-rs" } diff --git a/apps/desktop/src-tauri/src/main.rs b/apps/desktop/src-tauri/src/main.rs index f199f5d6..6cac8159 100644 --- a/apps/desktop/src-tauri/src/main.rs +++ b/apps/desktop/src-tauri/src/main.rs @@ -1,27 +1,29 @@ #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] -use std::collections::LinkedList; -use std::sync::{Arc}; -use std::path::PathBuf; use cpal::Devices; use regex::Regex; +use std::collections::LinkedList; +use std::path::PathBuf; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; +use std::vec; +use tauri::{ + CustomMenuItem, Manager, SystemTray, SystemTrayEvent, SystemTrayMenu, SystemTraySubmenu, Window, +}; +use tauri_plugin_oauth::start; +use tauri_plugin_positioner::{Position, WindowExt}; use tokio::sync::Mutex; -use std::sync::atomic::{AtomicBool}; -use std::{vec}; -use tauri::{CustomMenuItem, Manager, SystemTray, SystemTrayEvent, SystemTrayMenu, SystemTraySubmenu, Window}; -use window_vibrancy::{apply_blur, apply_vibrancy, NSVisualEffectMaterial}; use window_shadows::set_shadow; -use tauri_plugin_positioner::{WindowExt, Position}; -use tauri_plugin_oauth::start; +use window_vibrancy::{apply_blur, apply_vibrancy, NSVisualEffectMaterial}; +mod media; mod recording; mod upload; mod utils; -mod media; -use recording::{RecordingState, start_dual_recording, stop_all_recordings}; -use media::{enumerate_audio_devices}; -use utils::{has_screen_capture_access}; +use media::enumerate_audio_devices; +use recording::{start_dual_recording, stop_all_recordings, RecordingState}; +use utils::has_screen_capture_access; use ffmpeg_sidecar::{ command::ffmpeg_is_installed, diff --git a/apps/desktop/src-tauri/src/media.rs b/apps/desktop/src-tauri/src/media.rs index 436eaa0c..0f1ff1ce 100644 --- a/apps/desktop/src-tauri/src/media.rs +++ b/apps/desktop/src-tauri/src/media.rs @@ -1,55 +1,51 @@ -use cpal::traits::{DeviceTrait, HostTrait, StreamTrait}; +use byteorder::{ ByteOrder, LittleEndian }; +use cpal::traits::{ DeviceTrait, HostTrait, StreamTrait }; use cpal::SampleFormat; -use std::process::{Stdio}; -use byteorder::{ByteOrder, LittleEndian}; -use std::sync::{Arc, atomic::{AtomicBool, Ordering}}; -use std::io::{ErrorKind::WouldBlock, Error}; -use std::time::{Instant, Duration}; -use std::path::Path; -use image::{ImageBuffer, Rgba, ImageFormat}; use image::codecs::jpeg::JpegEncoder; - -use tokio::io::{AsyncWriteExt}; -use tokio::process::{Command, Child, ChildStdin}; -use tokio::sync::{mpsc, Mutex}; +use image::{ ImageBuffer, ImageFormat, Rgba }; +use std::io::{ Error, ErrorKind::WouldBlock }; +use std::path::{ Path, PathBuf }; +use std::process::Stdio; +use std::sync::{ atomic::{ AtomicBool, Ordering }, Arc }; +use std::time::{ Duration, Instant }; +use tokio::fs::File; + +use tokio::io::AsyncWriteExt; +use tokio::process::{ Child, ChildStdin, Command }; +use tokio::sync::{ mpsc, Mutex }; use tokio::try_join; use crate::recording::RecordingOptions; -use crate::utils::{ffmpeg_path_as_str}; -use crate::upload::upload_file; -use capture::{Capturer, Display}; +use crate::upload::{ self, upload_file }; +use crate::utils::{ create_named_pipe, ffmpeg_path_as_str }; +use capture::{ Capturer, Display }; const FRAME_RATE: u64 = 30; pub struct MediaRecorder { pub options: Option, - ffmpeg_audio_process: Option, - ffmpeg_video_process: Option, - ffmpeg_audio_stdin: Option>>>, - ffmpeg_video_stdin: Option>>>, + ffmpeg_process: Option, + ffmpeg_stdin: Option>>>, device_name: Option, - stream: Option, + audio_stream: Option, audio_channel_sender: Option>>, audio_channel_receiver: Option>>, video_channel_sender: Option>>, video_channel_receiver: Option>>, should_stop: Arc, start_time: Option, - audio_file_path: Option, - video_file_path: Option, + audio_file_path: Option, + video_file_path: Option, } impl MediaRecorder { - pub fn new() -> Self { MediaRecorder { options: None, - ffmpeg_audio_process: None, - ffmpeg_video_process: None, - ffmpeg_audio_stdin: None, - ffmpeg_video_stdin: None, + ffmpeg_process: None, + ffmpeg_stdin: None, device_name: None, - stream: None, + audio_stream: None, audio_channel_sender: None, audio_channel_receiver: None, video_channel_sender: None, @@ -61,7 +57,16 @@ impl MediaRecorder { } } - pub async fn start_media_recording(&mut self, options: RecordingOptions, audio_file_path: &str, video_file_path: &str, screenshot_file_path: &str, custom_device: Option<&str>, max_screen_width: usize, max_screen_height: usize) -> Result<(), String> { + pub async fn start_media_recording( + &mut self, + options: RecordingOptions, + screenshot_dir: &Path, + audio_chunks_dir: &Path, + video_chunks_dir: &Path, + custom_device: Option<&str>, + max_screen_width: usize, + max_screen_height: usize + ) -> Result<(), String> { self.options = Some(options.clone()); println!("Custom device: {:?}", custom_device); @@ -93,8 +98,7 @@ impl MediaRecorder { self.audio_channel_receiver = Some(audio_rx); self.video_channel_sender = Some(video_tx); self.video_channel_receiver = Some(video_rx); - self.ffmpeg_audio_stdin = Some(Arc::new(Mutex::new(None))); - self.ffmpeg_video_stdin = Some(Arc::new(Mutex::new(None))); + self.ffmpeg_stdin = Some(Arc::new(Mutex::new(None))); let audio_channel_sender = self.audio_channel_sender.clone(); let video_channel_sender = self.video_channel_sender.clone(); @@ -115,20 +119,36 @@ impl MediaRecorder { let device = if let Some(custom_device_name) = custom_device { input_devices - .find(|d| d.name().map(|name| name == custom_device_name).unwrap_or(false)) - .unwrap_or_else(|| host.default_input_device().expect("No default input device available")) + .find(|d| { + d.name() + .map(|name| name == custom_device_name) + .unwrap_or(false) + }) + .unwrap_or_else(|| { + host.default_input_device().expect("No default input device available") + }) } else { host.default_input_device().expect("No default input device available") }; println!("Using audio device: {}", device.name().expect("Failed to get device name")); - let config = device.supported_input_configs() + let config = device + .supported_input_configs() .expect("Failed to get supported input configs") - .find(|c| c.sample_format() == SampleFormat::F32 || c.sample_format() == SampleFormat::I16 || c.sample_format() == SampleFormat::I8 || c.sample_format() == SampleFormat::I32) - .unwrap_or_else(|| - device.supported_input_configs().expect("Failed to get supported input configs").next().expect("No supported input config") - ) + .find(|c| { + c.sample_format() == SampleFormat::F32 || + c.sample_format() == SampleFormat::I16 || + c.sample_format() == SampleFormat::I8 || + c.sample_format() == SampleFormat::I32 + }) + .unwrap_or_else(|| { + device + .supported_input_configs() + .expect("Failed to get supported input configs") + .next() + .expect("No supported input config") + }) .with_max_sample_rate(); let sample_rate = config.sample_rate().0; @@ -149,139 +169,145 @@ impl MediaRecorder { println!("FFmpeg binary path: {}", ffmpeg_binary_path_str); - let audio_file_path_owned = audio_file_path.to_owned(); - let video_file_path_owned = video_file_path.to_owned(); let sample_rate_str = sample_rate.to_string(); let channels_str = channels.to_string(); - let ffmpeg_audio_stdin = self.ffmpeg_audio_stdin.clone(); - let ffmpeg_video_stdin = self.ffmpeg_video_stdin.clone(); - let err_fn = move |err| { eprintln!("an error occurred on stream: {}", err); }; - if custom_device != Some("None") { + let needs_audio = custom_device != Some("None"); + + if needs_audio { println!("Building input stream..."); - let stream_result: Result = match config.sample_format() { - SampleFormat::I8 => device.build_input_stream( - &config.into(), - { - let audio_start_time = Arc::clone(&audio_start_time); - move |data: &[i8], _: &_| { - let mut first_frame_time_guard = audio_start_time.try_lock(); - - let bytes = data.iter().map(|&sample| sample as u8).collect::>(); - if let Some(sender) = &audio_channel_sender { - if sender.try_send(bytes).is_err() { - eprintln!("Channel send error. Dropping data."); + let stream_result: Result = match + config.sample_format() + { + SampleFormat::I8 => + device.build_input_stream( + &config.into(), + { + let audio_start_time = Arc::clone(&audio_start_time); + move |data: &[i8], _: &_| { + let mut first_frame_time_guard = audio_start_time.try_lock(); + + let bytes = data + .iter() + .map(|&sample| sample as u8) + .collect::>(); + if let Some(sender) = &audio_channel_sender { + if sender.try_send(bytes).is_err() { + eprintln!("Channel send error. Dropping data."); + } + } + + if let Ok(ref mut start_time_option) = first_frame_time_guard { + if start_time_option.is_none() { + **start_time_option = Some(Instant::now()); + + println!("Audio start time captured"); + } + } } - } - - if let Ok(ref mut start_time_option) = first_frame_time_guard { - if start_time_option.is_none() { - **start_time_option = Some(Instant::now()); - - println!("Audio start time captured"); - } - } - } - }, - err_fn, - None, - ), - SampleFormat::I16 => device.build_input_stream( - &config.into(), - { - let audio_start_time = Arc::clone(&audio_start_time); - move |data: &[i16], _: &_| { - let mut first_frame_time_guard = audio_start_time.try_lock(); - - let mut bytes = vec![0; data.len() * 2]; - LittleEndian::write_i16_into(data, &mut bytes); - if let Some(sender) = &audio_channel_sender { - if sender.try_send(bytes).is_err() { - eprintln!("Channel send error. Dropping data."); - } - } - - if let Ok(ref mut start_time_option) = first_frame_time_guard { - if start_time_option.is_none() { - **start_time_option = Some(Instant::now()); - - println!("Audio start time captured"); - } - } - } - }, - err_fn, - None, - ), - SampleFormat::I32 => device.build_input_stream( - &config.into(), - { - let audio_start_time = Arc::clone(&audio_start_time); - move |data: &[i32], _: &_| { - let mut first_frame_time_guard = audio_start_time.try_lock(); - - let mut bytes = vec![0; data.len() * 2]; - LittleEndian::write_i32_into(data, &mut bytes); - if let Some(sender) = &audio_channel_sender { - if sender.try_send(bytes).is_err() { - eprintln!("Channel send error. Dropping data."); - } - } - - if let Ok(ref mut start_time_option) = first_frame_time_guard { - if start_time_option.is_none() { - **start_time_option = Some(Instant::now()); - - println!("Audio start time captured"); - } - } - } - }, - err_fn, - None, - ), - SampleFormat::F32 => device.build_input_stream( - &config.into(), - { - let audio_start_time = Arc::clone(&audio_start_time); - move |data: &[f32], _: &_| { - let mut first_frame_time_guard = audio_start_time.try_lock(); - - let mut bytes = vec![0; data.len() * 4]; - LittleEndian::write_f32_into(data, &mut bytes); - if let Some(sender) = &audio_channel_sender { - if sender.try_send(bytes).is_err() { - eprintln!("Channel send error. Dropping data."); - } - } - - if let Ok(ref mut start_time_option) = first_frame_time_guard { - if start_time_option.is_none() { - **start_time_option = Some(Instant::now()); - - println!("Audio start time captured"); - } - } - } - }, - err_fn, - None, - ), - _sample_format => Err(cpal::BuildStreamError::DeviceNotAvailable), + }, + err_fn, + None + ), + SampleFormat::I16 => + device.build_input_stream( + &config.into(), + { + let audio_start_time = Arc::clone(&audio_start_time); + move |data: &[i16], _: &_| { + let mut first_frame_time_guard = audio_start_time.try_lock(); + + let mut bytes = vec![0; data.len() * 2]; + LittleEndian::write_i16_into(data, &mut bytes); + if let Some(sender) = &audio_channel_sender { + if sender.try_send(bytes).is_err() { + eprintln!("Channel send error. Dropping data."); + } + } + + if let Ok(ref mut start_time_option) = first_frame_time_guard { + if start_time_option.is_none() { + **start_time_option = Some(Instant::now()); + + println!("Audio start time captured"); + } + } + } + }, + err_fn, + None + ), + SampleFormat::I32 => + device.build_input_stream( + &config.into(), + { + let audio_start_time = Arc::clone(&audio_start_time); + move |data: &[i32], _: &_| { + let mut first_frame_time_guard = audio_start_time.try_lock(); + + let mut bytes = vec![0; data.len() * 2]; + LittleEndian::write_i32_into(data, &mut bytes); + if let Some(sender) = &audio_channel_sender { + if sender.try_send(bytes).is_err() { + eprintln!("Channel send error. Dropping data."); + } + } + + if let Ok(ref mut start_time_option) = first_frame_time_guard { + if start_time_option.is_none() { + **start_time_option = Some(Instant::now()); + + println!("Audio start time captured"); + } + } + } + }, + err_fn, + None + ), + SampleFormat::F32 => + device.build_input_stream( + &config.into(), + { + let audio_start_time = Arc::clone(&audio_start_time); + move |data: &[f32], _: &_| { + let mut first_frame_time_guard = audio_start_time.try_lock(); + + let mut bytes = vec![0; data.len() * 4]; + LittleEndian::write_f32_into(data, &mut bytes); + if let Some(sender) = &audio_channel_sender { + if sender.try_send(bytes).is_err() { + eprintln!("Channel send error. Dropping data."); + } + } + + if let Ok(ref mut start_time_option) = first_frame_time_guard { + if start_time_option.is_none() { + **start_time_option = Some(Instant::now()); + + println!("Audio start time captured"); + } + } + } + }, + err_fn, + None + ), + _sample_format => Err(cpal::BuildStreamError::DeviceNotAvailable), }; let stream = stream_result.map_err(|_| "Failed to build input stream")?; - self.stream = Some(stream); + self.audio_stream = Some(stream); self.trigger_play()?; } let video_start_time_clone = Arc::clone(&video_start_time); - let screenshot_file_path_owned = format!("{}/screen-capture.jpg", screenshot_file_path); + let screenshot_file_path = screenshot_dir.join("screen-capture.jpg"); let capture_frame_at = Duration::from_secs(3); std::thread::spawn(move || { @@ -292,7 +318,11 @@ impl MediaRecorder { _ => false, }; - let mut capturer = Capturer::new(Display::primary().expect("Failed to find primary display"), w.try_into().unwrap(), h.try_into().unwrap()).expect("Failed to start capture"); + let mut capturer = Capturer::new( + Display::primary().expect("Failed to find primary display"), + w.try_into().unwrap(), + h.try_into().unwrap() + ).expect("Failed to start capture"); let fps = FRAME_RATE; let spf = Duration::from_nanos(1_000_000_000 / fps); @@ -309,11 +339,18 @@ impl MediaRecorder { if now >= time_next { match capturer.frame() { Ok(frame) => { - let mut frame_data = Vec::with_capacity(capture_size.try_into().unwrap()); + let mut frame_data = Vec::with_capacity( + capture_size.try_into().unwrap() + ); for row in 0..adjusted_height { - let padded_stride = frame.stride_override().unwrap_or(calculated_stride); - assert!(padded_stride >= calculated_stride, "Image stride with padding should not be smaller than calculated bytes per row"); + let padded_stride = frame + .stride_override() + .unwrap_or(calculated_stride); + assert!( + padded_stride >= calculated_stride, + "Image stride with padding should not be smaller than calculated bytes per row" + ); // Each row should skip the padding of the previous row let start = row * padded_stride; // Each row should stop before/trim off its padding, for compatibility with software that doesn't follow arbitrary padding. @@ -323,44 +360,66 @@ impl MediaRecorder { if now - start_time >= capture_frame_at && !screenshot_captured { screenshot_captured = true; - let screenshot_file_path_owned_cloned = screenshot_file_path_owned.clone(); let mut frame_data_clone = frame_data.clone(); + let screenshot_file_path = screenshot_file_path.clone(); std::thread::spawn(move || { for chunk in frame_data_clone.chunks_mut(4) { chunk.swap(0, 2); } - let path = Path::new(&screenshot_file_path_owned_cloned); - let image: ImageBuffer, Vec> = ImageBuffer::from_raw( + let image: ImageBuffer< + Rgba, + Vec + > = ImageBuffer::from_raw( adjusted_width.try_into().unwrap(), adjusted_height.try_into().unwrap(), frame_data_clone ).expect("Failed to create image buffer"); - let mut output_file = std::fs::File::create(&path).expect("Failed to create output file"); - let mut encoder = JpegEncoder::new_with_quality(&mut output_file, 20); + let mut output_file = std::fs::File + ::create(&screenshot_file_path) + .expect("Failed to create output file"); + let mut encoder = JpegEncoder::new_with_quality( + &mut output_file, + 20 + ); if let Err(e) = encoder.encode_image(&image) { eprintln!("Failed to save screenshot: {}", e); } else { + println!( + "Screenshot captured and saved to {:?}", + screenshot_file_path + ); + if !is_local_mode { let rt = tokio::runtime::Runtime::new().unwrap(); - let screenshot_file_path_owned_cloned_copy = screenshot_file_path_owned_cloned.clone(); - rt.block_on(async { - let upload_task = tokio::spawn(upload_file(Some(options_clone), screenshot_file_path_owned_cloned_copy.clone(), "screenshot".to_string())); + rt.block_on(async move { + let upload_task = tokio::spawn( + upload_file( + Some(options_clone), + screenshot_file_path.clone(), + upload::FileType::Screenshot + ) + ); match upload_task.await { - Ok(result) => { + Ok(result) => match result { - Ok(_) => println!("Screenshot captured and saved to {:?}", path), - Err(e) => eprintln!("Failed to upload file: {}", e), + Ok(_) => + println!( + "Screenshot captured and saved to {:?}", + screenshot_file_path + ), + Err(e) => + eprintln!("Failed to upload file: {}", e), } - }, - Err(e) => eprintln!("Failed to join task: {}", e), + Err(e) => { + eprintln!("Failed to join task: {}", e) + } } }); } - println!("Screenshot captured and saved to {:?}", path); } }); } @@ -382,15 +441,15 @@ impl MediaRecorder { } frame_count += 1; - }, + } Err(error) if error.kind() == WouldBlock => { std::thread::sleep(Duration::from_millis(1)); continue; - }, + } Err(error) => { eprintln!("Capture error: {}", error); break; - }, + } } time_next += spf; @@ -404,15 +463,15 @@ impl MediaRecorder { } let elapsed_total_time = start_time.elapsed(); - let fps = frame_count as f64 / elapsed_total_time.as_secs_f64(); + let fps = (frame_count as f64) / elapsed_total_time.as_secs_f64(); println!("Current FPS: {}", fps); }); println!("Starting audio recording and processing..."); - let audio_output_chunk_pattern = format!("{}/audio_recording_%03d.aac", audio_file_path_owned); - let audio_segment_list_filename = format!("{}/segment_list.txt", audio_file_path_owned); - let video_output_chunk_pattern = format!("{}/video_recording_%03d.ts", video_file_path_owned); - let video_segment_list_filename = format!("{}/segment_list.txt", video_file_path_owned); + let audio_chunk_pattern = audio_chunks_dir.join("audio_recording_%03d.aac"); + let video_chunk_pattern = video_chunks_dir.join("video_recording_%03d.ts"); + let audio_segment_list_filename = audio_chunks_dir.join("segment_list.txt"); + let video_segment_list_filename = video_chunks_dir.join("segment_list.txt"); let mut audio_filters = Vec::new(); @@ -422,123 +481,121 @@ impl MediaRecorder { audio_filters.push("loudnorm"); - let mut ffmpeg_audio_command: Vec = vec![ - "-f", sample_format, - "-ar", &sample_rate_str, - "-ac", &channels_str, - "-thread_queue_size", "4096", - "-i", "pipe:0", - "-af", "aresample=async=1:min_hard_comp=0.100000:first_pts=0", - "-c:a", "aac", - "-b:a", "128k", - "-async", "1", - "-f", "segment", - "-segment_time", "3", - "-segment_time_delta", "0.01", - "-segment_list", &audio_segment_list_filename, - "-reset_timestamps", "1", - &audio_output_chunk_pattern, - ].into_iter().map(|s| s.to_string()).collect(); - - let mut ffmpeg_video_command: Vec = vec![ - "-f", "rawvideo", - "-pix_fmt", "bgra", - "-s", &format!("{}x{}", adjusted_width, adjusted_height), - "-r", "30", - "-thread_queue_size", "4096", - "-i", "pipe:0", - "-vf", "fps=30,scale=in_range=full:out_range=limited", - "-c:v", "libx264", - "-preset", "ultrafast", - "-pix_fmt", "yuv420p", - "-tune", "zerolatency", - "-vsync", "1", - "-force_key_frames", "expr:gte(t,n_forced*3)", - "-f", "segment", - "-segment_time", "3", - "-segment_time_delta", "0.01", - "-segment_list", &video_segment_list_filename, - "-segment_format", "ts", - "-movflags", "frag_keyframe+empty_moov", - "-reset_timestamps", "1", - &video_output_chunk_pattern, - ].into_iter().map(|s| s.to_string()).collect(); - - if custom_device != Some("None") { + let video_pipe_path = video_chunks_dir.join("pipe.pipe"); + + std::fs::remove_file(&video_pipe_path).ok(); + create_named_pipe(&video_pipe_path).map_err(|e| e.to_string())?; + + let audio_pipe_path = audio_chunks_dir.join("pipe.pipe"); + + std::fs::remove_file(&audio_pipe_path).ok(); + create_named_pipe(&audio_pipe_path).map_err(|e| e.to_string())?; + + let time_offset = if needs_audio { println!("Adjusting FFmpeg commands based on start times..."); - adjust_ffmpeg_commands_based_on_start_times( - Arc::clone(&audio_start_time), - Arc::clone(&video_start_time), - &mut ffmpeg_audio_command, - &mut ffmpeg_video_command, - ).await; - } + create_time_offset_args(&audio_start_time, &video_start_time).await + } else { + None + }; - println!("Starting FFmpeg audio and video processes..."); + let size = format!("{}x{}", adjusted_width, adjusted_height); - let mut audio_stdin: Option = None; - let mut audio_child: Option = None; + let mut ffmpeg_command = Command::new(ffmpeg_binary_path_str); - if custom_device != Some("None") { - let (child, stdin) = self.start_audio_ffmpeg_processes(&ffmpeg_binary_path_str, &ffmpeg_audio_command).await.map_err(|e| e.to_string())?; - audio_child = Some(child); - audio_stdin = Some(stdin); - println!("Audio process started"); + if let Some((TimeOffsetTarget::Video, args)) = &time_offset { + ffmpeg_command.args(args); } - let (video_child, video_stdin) = self.start_video_ffmpeg_processes(&ffmpeg_binary_path_str, &ffmpeg_video_command).await.map_err(|e| e.to_string())?; - println!("Video process started"); + ffmpeg_command + // video in + .args(["-f", "rawvideo", "-pix_fmt", "bgra"]) + .args(["-s", &size, "-r", "30"]) + .args(["-thread_queue_size", "4096", "-i"]) + .arg(&video_pipe_path) + // video out + .args(["-vf", "fps=30,scale=in_range=full:out_range=limited"]) + .args(["-c:v", "libx264", "-preset", "ultrafast"]) + .args(["-pix_fmt", "yuv420p", "-tune", "zerolatency"]) + .args(["-vsync", "1", "-force_key_frames", "expr:gte(t,n_forced*3)"]) + .args(["-f", "segment", "-movflags", "frag_keyframe+empty_moov"]) + .args(["-reset_timestamps", "1", "-an"]) + .args(["-segment_time", "3"]) + .args(["-segment_format", "ts"]) + .args(["-segment_time_delta", "0.01", "-segment_list"]) + .args([&video_segment_list_filename, &video_chunk_pattern]); + + if needs_audio { + if let Some((TimeOffsetTarget::Audio, args)) = &time_offset { + ffmpeg_command.args(args); + } - if let Some(ffmpeg_audio_stdin) = &self.ffmpeg_audio_stdin { - let mut audio_stdin_lock = ffmpeg_audio_stdin.lock().await; - *audio_stdin_lock = audio_stdin; - drop(audio_stdin_lock); - println!("Audio stdin set"); + ffmpeg_command + // audio in + .args(["-f", sample_format, "-ar", &sample_rate_str]) + .args(["-ac", &channels_str, "-thread_queue_size", "4096", "-i"]) + .arg(&audio_pipe_path) + // out + .args(["-af", "aresample=async=1:min_hard_comp=0.100000:first_pts=0"]) + .args(["-codec:a", "aac", "-b:a", "128k"]) + .args(["-async", "1", "-f", "segment"]) + .args(["-segment_time", "3", "-segment_time_delta", "0.01"]) + .args(["-reset_timestamps", "1", "-vn", "-segment_list"]) + .args([&audio_segment_list_filename, &audio_chunk_pattern]); } - if let Some(ffmpeg_video_stdin) = &self.ffmpeg_video_stdin { - let mut video_stdin_lock = ffmpeg_video_stdin.lock().await; - *video_stdin_lock = Some(video_stdin); - drop(video_stdin_lock); - println!("Video stdin set"); + println!("Starting FFmpeg process..."); + + let (ffmpeg_child, ffmpeg_stdin) = self + .start_ffmpeg_process(ffmpeg_command).await + .map_err(|e| e.to_string())?; + println!("Ffmpeg process started"); + + if let Some(ffmpeg_stdin_mutex) = &self.ffmpeg_stdin { + let mut stdin_lock = ffmpeg_stdin_mutex.lock().await; + *stdin_lock = Some(ffmpeg_stdin); + drop(stdin_lock); + println!("Ffmpeg stdin set"); } - if custom_device != Some("None") { + if needs_audio { println!("Starting audio channel senders..."); + tokio::spawn(async move { - while let Some(bytes) = &audio_channel_receiver.lock().await.as_mut().unwrap().recv().await { - if let Some(audio_stdin_arc) = &ffmpeg_audio_stdin{ - let mut audio_stdin_guard = audio_stdin_arc.lock().await; - if let Some(ref mut stdin) = *audio_stdin_guard { - stdin.write_all(&bytes).await.expect("Failed to write audio data to FFmpeg stdin"); - } - drop(audio_stdin_guard); - } + let mut audio_pipe = File::create(audio_pipe_path).await.unwrap(); + + while + let Some(bytes) = &audio_channel_receiver + .lock().await + .as_mut() + .unwrap() + .recv().await + { + audio_pipe + .write_all(&bytes).await + .expect("Failed to write audio data to FFmpeg stdin"); } }); } println!("Starting video channel senders..."); tokio::spawn(async move { - while let Some(bytes) = &video_channel_receiver.lock().await.as_mut().unwrap().recv().await { - if let Some(video_stdin_arc) = &ffmpeg_video_stdin { - let mut video_stdin_guard = video_stdin_arc.lock().await; - if let Some(ref mut stdin) = *video_stdin_guard { - stdin.write_all(&bytes).await.expect("Failed to write video data to FFmpeg stdin"); - } - drop(video_stdin_guard); - } + let mut pipe = File::create(video_pipe_path).await.unwrap(); + + while + let Some(bytes) = &video_channel_receiver + .lock().await + .as_mut() + .unwrap() + .recv().await + { + pipe.write_all(&bytes).await.expect("Failed to write video data to FFmpeg stdin"); } }); - if custom_device != Some("None") { - self.ffmpeg_audio_process = audio_child; - } - self.start_time = Some(Instant::now()); - self.audio_file_path = Some(audio_file_path_owned); - self.video_file_path = Some(video_file_path_owned); - self.ffmpeg_video_process = Some(video_child); + self.audio_file_path = Some(audio_chunks_dir.to_path_buf()); + self.video_file_path = Some(video_chunks_dir.to_path_buf()); + self.ffmpeg_process = Some(ffmpeg_child); self.device_name = Some(device.name().expect("Failed to get device name")); println!("End of the start_audio_recording function"); @@ -546,8 +603,8 @@ impl MediaRecorder { Ok(()) } - pub fn trigger_play (&mut self) -> Result<(), &'static str> { - if let Some(ref mut stream) = self.stream { + pub fn trigger_play(&mut self) -> Result<(), &'static str> { + if let Some(ref mut stream) = self.audio_stream { stream.play().map_err(|_| "Failed to play stream")?; println!("Audio recording playing."); } else { @@ -564,17 +621,24 @@ impl MediaRecorder { let expected_segments = recording_duration.as_secs() / segment_duration.as_secs(); let audio_file_path = self.audio_file_path.as_ref().ok_or("Audio file path not set")?; let video_file_path = self.video_file_path.as_ref().ok_or("Video file path not set")?; - let audio_segment_list_filename = format!("{}/segment_list.txt", audio_file_path); - let video_segment_list_filename = format!("{}/segment_list.txt", video_file_path); + let audio_segment_list_filename = audio_file_path.join("segment_list.txt"); + let video_segment_list_filename = video_file_path.join("segment_list.txt"); loop { - let audio_segments = std::fs::read_to_string(&audio_segment_list_filename).unwrap_or_default(); - let video_segments = std::fs::read_to_string(&video_segment_list_filename).unwrap_or_default(); + let audio_segments = std::fs + ::read_to_string(&audio_segment_list_filename) + .unwrap_or_default(); + let video_segments = std::fs + ::read_to_string(&video_segment_list_filename) + .unwrap_or_default(); let audio_segment_count = audio_segments.lines().count(); let video_segment_count = video_segments.lines().count(); - if audio_segment_count >= expected_segments as usize && video_segment_count >= expected_segments as usize { + if + audio_segment_count >= (expected_segments as usize) && + video_segment_count >= (expected_segments as usize) + { println!("All segments generated"); break; } @@ -583,23 +647,13 @@ impl MediaRecorder { } } - if let Some(ref ffmpeg_audio_stdin) = self.ffmpeg_audio_stdin { - let mut audio_stdin_guard = ffmpeg_audio_stdin.lock().await; - if let Some(mut audio_stdin) = audio_stdin_guard.take() { - if let Err(e) = audio_stdin.write_all(b"q\n").await { - eprintln!("Failed to send 'q' to audio FFmpeg process: {}", e); - } - let _ = audio_stdin.shutdown().await.map_err(|e| e.to_string()); - } - } - - if let Some(ref ffmpeg_video_stdin) = self.ffmpeg_video_stdin { - let mut video_stdin_guard = ffmpeg_video_stdin.lock().await; - if let Some(mut video_stdin) = video_stdin_guard.take() { - if let Err(e) = video_stdin.write_all(b"q\n").await { + if let Some(ref ffmpeg_stdin) = self.ffmpeg_stdin { + let mut stdin_guard = ffmpeg_stdin.lock().await; + if let Some(mut stdin) = stdin_guard.take() { + if let Err(e) = stdin.write_all(b"q\n").await { eprintln!("Failed to send 'q' to video FFmpeg process: {}", e); } - let _ = video_stdin.shutdown().await.map_err(|e| e.to_string()); + let _ = stdin.shutdown().await.map_err(|e| e.to_string()); } } @@ -613,18 +667,14 @@ impl MediaRecorder { drop(sender); } - if let Some(ref mut stream) = self.stream { + if let Some(ref mut stream) = self.audio_stream { stream.pause().map_err(|_| "Failed to pause stream")?; println!("Audio recording paused."); } else { return Err("Original recording was not started".to_string()); } - if let Some(process) = &mut self.ffmpeg_audio_process { - let _ = process.kill().await.map_err(|e| e.to_string()); - } - - if let Some(process) = &mut self.ffmpeg_video_process { + if let Some(process) = &mut self.ffmpeg_process { let _ = process.kill().await.map_err(|e| e.to_string()); } @@ -632,30 +682,8 @@ impl MediaRecorder { Ok(()) } - async fn start_audio_ffmpeg_processes( - &self, - ffmpeg_binary_path: &str, - audio_ffmpeg_command: &[String], - ) -> Result<(Child, ChildStdin), Error> { - let mut audio_process = start_recording_process(ffmpeg_binary_path, audio_ffmpeg_command).await.map_err(|e| { - eprintln!("Failed to start audio recording process: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, e.to_string()) - })?; - - let audio_stdin = audio_process.stdin.take().ok_or_else(|| { - eprintln!("Failed to take audio stdin"); - std::io::Error::new(std::io::ErrorKind::Other, "Failed to take audio stdin") - })?; - - Ok((audio_process, audio_stdin)) - } - - async fn start_video_ffmpeg_processes( - &self, - ffmpeg_binary_path: &str, - video_ffmpeg_command: &[String], - ) -> Result<(Child, ChildStdin), Error> { - let mut video_process = start_recording_process(ffmpeg_binary_path, video_ffmpeg_command).await.map_err(|e| { + async fn start_ffmpeg_process(&self, cmd: Command) -> Result<(Child, ChildStdin), Error> { + let mut video_process = start_recording_process(cmd).await.map_err(|e| { eprintln!("Failed to start video recording process: {}", e); std::io::Error::new(std::io::ErrorKind::Other, e.to_string()) })?; @@ -667,7 +695,6 @@ impl MediaRecorder { Ok((video_process, video_stdin)) } - } #[tauri::command] @@ -695,20 +722,15 @@ pub fn enumerate_audio_devices() -> Vec { input_device_names } -use tokio::io::{BufReader, AsyncBufReadExt}; +use tokio::io::{ AsyncBufReadExt, BufReader }; async fn start_recording_process( - ffmpeg_binary_path_str: &str, - args: &[String], + mut cmd: Command ) -> Result { - let mut process = Command::new(ffmpeg_binary_path_str) - .args(args) - .stdin(Stdio::piped()) - .stderr(Stdio::piped()) - .spawn()?; + let mut process = cmd.stdin(Stdio::piped()).stderr(Stdio::piped()).spawn()?; if let Some(process_stderr) = process.stderr.take() { - tokio::spawn(async move { + tokio::spawn(async move { let mut process_reader = BufReader::new(process_stderr).lines(); while let Ok(Some(line)) = process_reader.next_line().await { eprintln!("FFmpeg process STDERR: {}", line); @@ -720,8 +742,8 @@ async fn start_recording_process( } async fn wait_for_start_times( - audio_start_time: Arc>>, - video_start_time: Arc>>, + audio_start_time: &Mutex>, + video_start_time: &Mutex> ) -> (Instant, Instant) { loop { let audio_start_locked = audio_start_time.lock().await; @@ -738,12 +760,15 @@ async fn wait_for_start_times( } } -async fn adjust_ffmpeg_commands_based_on_start_times( - audio_start_time: Arc>>, - video_start_time: Arc>>, - ffmpeg_audio_command: &mut Vec, - ffmpeg_video_command: &mut Vec, -) { +pub enum TimeOffsetTarget { + Audio, + Video, +} + +async fn create_time_offset_args( + audio_start_time: &Mutex>, + video_start_time: &Mutex> +) -> Option<(TimeOffsetTarget, Vec)> { let (audio_start, video_start) = wait_for_start_times(audio_start_time, video_start_time).await; let duration_difference = if audio_start > video_start { audio_start.duration_since(video_start) @@ -756,22 +781,27 @@ async fn adjust_ffmpeg_commands_based_on_start_times( println!("Video start: {:?}", video_start); // Convert the duration difference to a float representing seconds - let offset_seconds = duration_difference.as_secs() as f64 - + duration_difference.subsec_nanos() as f64 * 1e-9; + let offset_seconds = + (duration_difference.as_secs() as f64) + (duration_difference.subsec_nanos() as f64) * 1e-9; // Depending on which started first, adjust the relevant FFmpeg command if audio_start > video_start { // Offset the video start time - ffmpeg_video_command.splice(0..0, vec![ - "-itsoffset".to_string(), format!("{:.3}", offset_seconds) - ]); println!("Applying -itsoffset {:.3} to video", offset_seconds); + + Some(( + TimeOffsetTarget::Video, + vec!["-itsoffset".to_string(), format!("{:.3}", offset_seconds)], + )) } else if video_start > audio_start { // Offset the audio start time - ffmpeg_audio_command.splice(0..0, vec![ - "-itsoffset".to_string(), format!("{:.3}", offset_seconds) - ]); println!("Applying -itsoffset {:.3} to audio", offset_seconds); - } + Some(( + TimeOffsetTarget::Audio, + vec!["-itsoffset".to_string(), format!("{:.3}", offset_seconds)], + )) + } else { + None + } } diff --git a/apps/desktop/src-tauri/src/recording.rs b/apps/desktop/src-tauri/src/recording.rs index d7bc2492..488dcf08 100644 --- a/apps/desktop/src-tauri/src/recording.rs +++ b/apps/desktop/src-tauri/src/recording.rs @@ -1,28 +1,31 @@ -use std::path::{Path, PathBuf}; +use futures::future::join_all; +use serde::{Deserialize, Serialize}; use std::collections::HashSet; -use std::io::{self, BufReader, BufRead, ErrorKind}; use std::fs::File; -use std::sync::{Arc, atomic::{AtomicBool, Ordering}}; -use tokio::sync:: {Mutex}; -use tokio::task::JoinHandle; -use tokio::time::{Duration}; -use serde::{Serialize, Deserialize}; +use std::io::{self, BufRead, BufReader, ErrorKind}; +use std::path::{Path, PathBuf}; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; use tauri::State; -use futures::future::join_all; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; +use tokio::time::Duration; -use crate::upload::{upload_file}; +use crate::upload::{self, upload_file}; use crate::media::MediaRecorder; pub struct RecordingState { - pub media_process: Option, - pub recording_options: Option, - pub shutdown_flag: Arc, - pub video_uploading_finished: Arc, - pub audio_uploading_finished: Arc, - pub data_dir: Option, - pub max_screen_width: usize, - pub max_screen_height: usize, + pub media_process: Option, + pub recording_options: Option, + pub shutdown_flag: Arc, + pub video_uploading_finished: Arc, + pub audio_uploading_finished: Arc, + pub data_dir: Option, + pub max_screen_width: usize, + pub max_screen_height: usize, } unsafe impl Send for RecordingState {} @@ -32,85 +35,112 @@ unsafe impl Sync for MediaRecorder {} #[derive(Debug, Serialize, Deserialize, Clone, specta::Type)] pub struct RecordingOptions { - pub user_id: String, - pub video_id: String, - pub screen_index: String, - pub video_index: String, - pub audio_name: String, - pub aws_region: String, - pub aws_bucket: String, + pub user_id: String, + pub video_id: String, + pub screen_index: String, + pub video_index: String, + pub audio_name: String, + pub aws_region: String, + pub aws_bucket: String, } #[tauri::command] #[specta::specta] pub async fn start_dual_recording( - state: State<'_, Arc>>, - options: RecordingOptions, + state: State<'_, Arc>>, + options: RecordingOptions, ) -> Result<(), String> { - println!("Starting screen recording..."); - let mut state_guard = state.lock().await; + println!("Starting screen recording..."); + let mut state_guard = state.lock().await; - let shutdown_flag = Arc::new(AtomicBool::new(false)); + let shutdown_flag = Arc::new(AtomicBool::new(false)); - let data_dir = state_guard.data_dir.as_ref() - .ok_or("Data directory is not set in the recording state".to_string())?.clone(); + let data_dir = state_guard + .data_dir + .as_ref() + .ok_or("Data directory is not set in the recording state".to_string())? + .clone(); - println!("data_dir: {:?}", data_dir); + println!("data_dir: {:?}", data_dir); - let audio_chunks_dir = data_dir.join("chunks/audio"); - let video_chunks_dir = data_dir.join("chunks/video"); - let screenshot_dir = data_dir.join("screenshots"); + let screenshot_dir = data_dir.join("screenshots"); + let audio_chunks_dir = data_dir.join("chunks/audio"); + let video_chunks_dir = data_dir.join("chunks/video"); - clean_and_create_dir(&audio_chunks_dir)?; - clean_and_create_dir(&video_chunks_dir)?; - clean_and_create_dir(&screenshot_dir)?; + clean_and_create_dir(&screenshot_dir)?; + clean_and_create_dir(&audio_chunks_dir)?; + clean_and_create_dir(&video_chunks_dir)?; - let audio_name = if options.audio_name.is_empty() { - None - } else { - Some(options.audio_name.clone()) - }; - - let media_recording_preparation = prepare_media_recording(&options, &audio_chunks_dir, &video_chunks_dir, &screenshot_dir, audio_name, state_guard.max_screen_width, state_guard.max_screen_height); - let media_recording_result = media_recording_preparation.await.map_err(|e| e.to_string())?; - - state_guard.media_process = Some(media_recording_result); - state_guard.recording_options = Some(options.clone()); - state_guard.shutdown_flag = shutdown_flag.clone(); - state_guard.video_uploading_finished = Arc::new(AtomicBool::new(false)); - state_guard.audio_uploading_finished = Arc::new(AtomicBool::new(false)); - - let is_local_mode = match dotenv_codegen::dotenv!("NEXT_PUBLIC_LOCAL_MODE") { - "true" => true, - _ => false, - }; - - if !is_local_mode { - let screen_upload = start_upload_loop(video_chunks_dir.clone(), options.clone(), "video".to_string(), shutdown_flag.clone(), state_guard.video_uploading_finished.clone()); - let audio_upload = start_upload_loop(audio_chunks_dir, options.clone(), "audio".to_string(), shutdown_flag.clone(), state_guard.audio_uploading_finished.clone()); + let audio_name = if options.audio_name.is_empty() { + None + } else { + Some(options.audio_name.clone()) + }; - drop(state_guard); + let media_recording_preparation = prepare_media_recording( + &options, + &screenshot_dir, + &audio_chunks_dir, + &video_chunks_dir, + audio_name, + state_guard.max_screen_width, + state_guard.max_screen_height, + ); + let media_recording_result = media_recording_preparation + .await + .map_err(|e| e.to_string())?; + + state_guard.media_process = Some(media_recording_result); + state_guard.recording_options = Some(options.clone()); + state_guard.shutdown_flag = shutdown_flag.clone(); + state_guard.video_uploading_finished = Arc::new(AtomicBool::new(false)); + state_guard.audio_uploading_finished = Arc::new(AtomicBool::new(false)); - println!("Starting upload loops..."); + let is_local_mode = match dotenv_codegen::dotenv!("NEXT_PUBLIC_LOCAL_MODE") { + "true" => true, + _ => false, + }; - match tokio::try_join!(screen_upload, audio_upload) { - Ok(_) => { - println!("Both upload loops completed successfully."); - }, - Err(e) => { - eprintln!("An error occurred: {}", e); - }, - } - } else { - println!("Skipping upload loops due to NEXT_PUBLIC_LOCAL_MODE being set to 'true'."); - } + if !is_local_mode { + let video_upload = start_upload_loop( + video_chunks_dir.clone(), + options.clone(), + upload::FileType::Video, + shutdown_flag.clone(), + state_guard.video_uploading_finished.clone(), + ); + let audio_upload = start_upload_loop( + audio_chunks_dir, + options.clone(), + upload::FileType::Audio, + shutdown_flag.clone(), + state_guard.audio_uploading_finished.clone(), + ); + + drop(state_guard); + + println!("Starting upload loops..."); + + match tokio::try_join!(video_upload, audio_upload) { + Ok(_) => { + println!("Both upload loops completed successfully."); + } + Err(e) => { + eprintln!("An error occurred: {}", e); + } + } + } else { + println!("Skipping upload loops due to NEXT_PUBLIC_LOCAL_MODE being set to 'true'."); + } - Ok(()) + Ok(()) } #[tauri::command] #[specta::specta] -pub async fn stop_all_recordings(state: State<'_, Arc>>) -> Result<(), String> { +pub async fn stop_all_recordings( + state: State<'_, Arc>>, +) -> Result<(), String> { let mut guard = state.lock().await; println!("Stopping media recording..."); @@ -119,7 +149,10 @@ pub async fn stop_all_recordings(state: State<'_, Arc>>) - if let Some(mut media_process) = guard.media_process.take() { println!("Stopping media recording..."); - media_process.stop_media_recording().await.expect("Failed to stop media recording"); + media_process + .stop_media_recording() + .await + .expect("Failed to stop media recording"); } let is_local_mode = match dotenv_codegen::dotenv!("NEXT_PUBLIC_LOCAL_MODE") { @@ -129,7 +162,8 @@ pub async fn stop_all_recordings(state: State<'_, Arc>>) - if !is_local_mode { while !guard.video_uploading_finished.load(Ordering::SeqCst) - || !guard.audio_uploading_finished.load(Ordering::SeqCst) { + || !guard.audio_uploading_finished.load(Ordering::SeqCst) + { println!("Waiting for uploads to finish..."); tokio::time::sleep(Duration::from_millis(50)).await; } @@ -148,24 +182,24 @@ fn clean_and_create_dir(dir: &Path) -> Result<(), String> { std::fs::create_dir_all(dir).map_err(|e| e.to_string())?; if !dir.to_string_lossy().contains("screenshots") { - let segment_list_path = dir.join("segment_list.txt"); - match File::open(&segment_list_path) { - Ok(_) => Ok(()), - Err(ref e) if e.kind() == ErrorKind::NotFound => { - File::create(&segment_list_path).map_err(|e| e.to_string())?; - Ok(()) - }, - Err(e) => Err(e.to_string()), - } + let segment_list_path = dir.join("segment_list.txt"); + match File::open(&segment_list_path) { + Ok(_) => Ok(()), + Err(ref e) if e.kind() == ErrorKind::NotFound => { + File::create(&segment_list_path).map_err(|e| e.to_string())?; + Ok(()) + } + Err(e) => Err(e.to_string()), + } } else { - Ok(()) + Ok(()) } } async fn start_upload_loop( chunks_dir: PathBuf, options: RecordingOptions, - video_type: String, + file_type: upload::FileType, shutdown_flag: Arc, uploading_finished: Arc, ) -> Result<(), String> { @@ -191,12 +225,11 @@ async fn start_upload_loop( let segment_path = chunks_dir.join(segment_filename); if segment_path.is_file() { let options_clone = options.clone(); - let video_type_clone = video_type.clone(); - let segment_path_clone = segment_path.clone(); upload_tasks.push(tokio::spawn(async move { - let filepath_str = segment_path_clone.to_str().unwrap_or_default().to_owned(); - println!("Uploading video for {}: {}", video_type_clone, filepath_str); - upload_file(Some(options_clone), filepath_str, video_type_clone).await.map(|_| ()) + println!("Uploading video for {file_type}: {segment_path:?}"); + upload_file(Some(options_clone), segment_path, file_type) + .await + .map(|_| ()) })); } watched_segments.insert(segment_filename.clone()); @@ -228,18 +261,25 @@ fn load_segment_list(segment_list_path: &Path) -> io::Result> { } async fn prepare_media_recording( - options: &RecordingOptions, - audio_chunks_dir: &Path, - screenshot_dir: &Path, - video_chunks_dir: &Path, - audio_name: Option, - max_screen_width: usize, - max_screen_height: usize, + options: &RecordingOptions, + screenshot_dir: &Path, + audio_chunks_dir: &Path, + video_chunks_dir: &Path, + audio_name: Option, + max_screen_width: usize, + max_screen_height: usize, ) -> Result { - let mut media_recorder = MediaRecorder::new(); - let audio_file_path = audio_chunks_dir.to_str().unwrap(); - let video_file_path = video_chunks_dir.to_str().unwrap(); - let screenshot_dir_path = screenshot_dir.to_str().unwrap(); - media_recorder.start_media_recording(options.clone(), audio_file_path, screenshot_dir_path, video_file_path, audio_name.as_ref().map(String::as_str), max_screen_width, max_screen_height).await?; - Ok(media_recorder) + let mut media_recorder = MediaRecorder::new(); + media_recorder + .start_media_recording( + options.clone(), + screenshot_dir, + audio_chunks_dir, + video_chunks_dir, + audio_name.as_ref().map(String::as_str), + max_screen_width, + max_screen_height, + ) + .await?; + Ok(media_recorder) } diff --git a/apps/desktop/src-tauri/src/upload.rs b/apps/desktop/src-tauri/src/upload.rs index 55cf324d..89ed6beb 100644 --- a/apps/desktop/src-tauri/src/upload.rs +++ b/apps/desktop/src-tauri/src/upload.rs @@ -1,25 +1,40 @@ +use regex::Regex; use reqwest; -use std::fs::File; -use std::io::Read; -use std::path::Path; +use serde_json::Value as JsonValue; +use std::path::{Path, PathBuf}; use std::process::{Command, Output}; use std::str; -use std::fs; -use regex::Regex; -use serde_json::Value as JsonValue; use crate::recording::RecordingOptions; -use crate::utils::{ffmpeg_path_as_str}; +use crate::utils::ffmpeg_path_as_str; + +#[derive(Clone, Copy)] +pub enum FileType { + Video, + Audio, + Screenshot, +} + +impl std::fmt::Display for FileType { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + FileType::Video => write!(f, "video"), + FileType::Audio => write!(f, "audio"), + FileType::Screenshot => write!(f, "screenshot"), + } + } +} pub async fn upload_file( options: Option, - file_path: String, - file_type: String, + file_path: PathBuf, + file_type: FileType, ) -> Result { if let Some(ref options) = options { println!("Uploading video..."); - let duration = get_video_duration(&file_path).map_err(|e| format!("Failed to get video duration: {}", e))?; + let duration = get_video_duration(&file_path) + .map_err(|e| format!("Failed to get video duration: {}", e))?; let duration_str = duration.to_string(); let file_name = Path::new(&file_path) @@ -28,40 +43,45 @@ pub async fn upload_file( .ok_or("Invalid file path")? .to_string(); - let file_key = format!("{}/{}/{}/{}", options.user_id, options.video_id, file_type, file_name); + let file_key = format!( + "{}/{}/{file_type}/{file_name}", + options.user_id, options.video_id, + ); let server_url_base: &'static str = dotenv_codegen::dotenv!("NEXT_PUBLIC_URL"); let server_url = format!("{}/api/upload/signed", server_url_base); - - let body: serde_json::Value; - - if file_type == "video" { - let (codec_name, width, height, frame_rate, bit_rate) = log_video_info(&file_path).map_err(|e| format!("Failed to log video info: {}", e))?; - - body = serde_json::json!({ - "userId": options.user_id, - "fileKey": file_key, - "awsBucket": options.aws_bucket, - "awsRegion": options.aws_region, - "duration": duration_str, - "resolution": format!("{}x{}", width, height), - "framerate": frame_rate, - "bandwidth": bit_rate, - "videoCodec": codec_name, - }); - } else { - - body = serde_json::json!({ - "userId": options.user_id, - "fileKey": file_key, - "awsBucket": options.aws_bucket, - "awsRegion": options.aws_region, - "duration": duration_str, - }); - } + + let body = match file_type { + FileType::Video => { + let (codec_name, width, height, frame_rate, bit_rate) = log_video_info(&file_path) + .map_err(|e| format!("Failed to log video info: {}", e))?; + + serde_json::json!({ + "userId": options.user_id, + "fileKey": file_key, + "awsBucket": options.aws_bucket, + "awsRegion": options.aws_region, + "duration": duration_str, + "resolution": format!("{}x{}", width, height), + "framerate": frame_rate, + "bandwidth": bit_rate, + "videoCodec": codec_name, + }) + } + FileType::Audio | FileType::Screenshot => { + serde_json::json!({ + "userId": options.user_id, + "fileKey": file_key, + "awsBucket": options.aws_bucket, + "awsRegion": options.aws_region, + "duration": duration_str, + }) + } + }; let client = reqwest::Client::new(); - let server_response = client.post(server_url) + let server_response = client + .post(server_url) .json(&body) .send() .await @@ -72,36 +92,36 @@ pub async fn upload_file( println!("Server response: {}", server_response); - // Deserialize the server response let presigned_post_data: JsonValue = serde_json::from_str(&server_response) .map_err(|e| format!("Failed to deserialize server response: {}", e))?; // Construct the multipart form for the file upload - let fields = presigned_post_data["presignedPostData"]["fields"].as_object() + let fields = presigned_post_data["presignedPostData"]["fields"] + .as_object() .ok_or("Fields object is missing or not an object")?; - + let mut form = reqwest::multipart::Form::new(); - + for (key, value) in fields.iter() { - let value_str = value.as_str() + let value_str = value + .as_str() .ok_or(format!("Value for key '{}' is not a string", key))?; form = form.text(key.to_string(), value_str.to_owned()); } - println!("Uploading file: {}", file_path); - - let mime_type = if file_path.to_lowercase().ends_with(".aac") { - "audio/aac" - } else if file_path.to_lowercase().ends_with(".webm") { - "audio/webm" - } else if file_path.to_lowercase().ends_with(".mp3") { - "audio/mpeg" - } else { - "video/mp2t" + println!("Uploading file: {file_path:?}"); + + let mime_type = match file_path.extension() { + Some(ext) if ext == "aac" => "audio/aac", + Some(ext) if ext == "mp3" => "audio/mpeg", + Some(ext) if ext == "webm" => "audio/webm", + _ => "video/mp2t", }; - let file_bytes = tokio::fs::read(&file_path).await.map_err(|e| format!("Failed to read file: {}", e))?; + let file_bytes = tokio::fs::read(&file_path) + .await + .map_err(|e| format!("Failed to read file: {}", e))?; let file_part = reqwest::multipart::Part::bytes(file_bytes) .file_name(file_name.clone()) .mime_str(mime_type) @@ -109,15 +129,13 @@ pub async fn upload_file( form = form.part("file", file_part); - let post_url = presigned_post_data["presignedPostData"]["url"].as_str() + let post_url = presigned_post_data["presignedPostData"]["url"] + .as_str() .ok_or("URL is missing or not a string")?; println!("Uploading file to: {}", post_url); - let response = client.post(post_url) - .multipart(form) - .send() - .await; + let response = client.post(post_url).multipart(form).send().await; match response { Ok(response) if response.status().is_success() => { @@ -125,16 +143,25 @@ pub async fn upload_file( } Ok(response) => { let status = response.status(); - let error_body = response.text().await.unwrap_or_else(|_| "".to_string()); - eprintln!("Failed to upload file. Status: {}. Body: {}", status, error_body); - return Err(format!("Failed to upload file. Status: {}. Body: {}", status, error_body)); + let error_body = response + .text() + .await + .unwrap_or_else(|_| "".to_string()); + eprintln!( + "Failed to upload file. Status: {}. Body: {}", + status, error_body + ); + return Err(format!( + "Failed to upload file. Status: {}. Body: {}", + status, error_body + )); } Err(e) => { return Err(format!("Failed to send upload file request: {}", e)); } } - println!("Removing file after upload: {}", file_path); + println!("Removing file after upload: {file_path:?}"); let remove_result = tokio::fs::remove_file(&file_path).await; match &remove_result { Ok(_) => println!("File removed successfully"), @@ -148,7 +175,7 @@ pub async fn upload_file( } } -pub fn get_video_duration(file_path: &str) -> Result { +pub fn get_video_duration(file_path: &Path) -> Result { let ffmpeg_binary_path_str = ffmpeg_path_as_str().unwrap().to_owned(); let output = Command::new(ffmpeg_binary_path_str) @@ -169,7 +196,7 @@ pub fn get_video_duration(file_path: &str) -> Result { Ok(duration) } -fn log_video_info(file_path: &str) -> Result<(String, String, String, String, String), String> { +fn log_video_info(file_path: &Path) -> Result<(String, String, String, String, String), String> { let output: Output = Command::new("ffprobe") .arg("-v") .arg("error") @@ -193,13 +220,14 @@ fn log_video_info(file_path: &str) -> Result<(String, String, String, String, St let codec_name = info_parts[0].to_string(); let width: String = info_parts[1].to_string(); let height: String = info_parts[2].to_string(); - + // Parse frame rate as a fraction and convert to float let frame_rate_parts: Vec<&str> = info_parts[3].split('/').collect(); - let frame_rate: f64 = frame_rate_parts[0].parse::().unwrap() / frame_rate_parts[1].parse::().unwrap(); + let frame_rate: f64 = + frame_rate_parts[0].parse::().unwrap() / frame_rate_parts[1].parse::().unwrap(); let frame_rate: String = frame_rate.to_string(); - + let bit_rate: String = info_parts[4].to_string(); Ok((codec_name, width, height, frame_rate, bit_rate)) -} \ No newline at end of file +} diff --git a/apps/desktop/src-tauri/src/utils.rs b/apps/desktop/src-tauri/src/utils.rs index 34b4213f..4413b6ca 100644 --- a/apps/desktop/src-tauri/src/utils.rs +++ b/apps/desktop/src-tauri/src/utils.rs @@ -104,14 +104,9 @@ pub fn ffmpeg_path_as_str() -> Result { } } -pub fn create_named_pipe(path: &str) -> Result<(), nix::Error> { +pub fn create_named_pipe(path: &Path) -> Result<(), nix::Error> { use nix::sys::stat; use nix::unistd; unistd::mkfifo(path, stat::Mode::S_IRWXU)?; Ok(()) } - -pub fn remove_named_pipe(path: &str) -> Result<(), std::io::Error> { - std::fs::remove_file(path)?; - Ok(()) -} diff --git a/apps/web/app/api/desktop/video/create/route.ts b/apps/web/app/api/desktop/video/create/route.ts index c7bd9ac4..fa65d979 100644 --- a/apps/web/app/api/desktop/video/create/route.ts +++ b/apps/web/app/api/desktop/video/create/route.ts @@ -4,6 +4,7 @@ import { videos } from "@cap/database/schema"; import { getCurrentUser } from "@cap/database/auth/session"; import { nanoId } from "@cap/database/helpers"; import { cookies } from "next/headers"; +import { dub } from "@/utils/dub"; const allowedOrigins = [ process.env.NEXT_PUBLIC_URL, @@ -91,23 +92,11 @@ export async function GET(req: NextRequest) { process.env.NEXT_PUBLIC_IS_CAP && process.env.NEXT_PUBLIC_ENVIRONMENT === "production" ) { - const dubOptions = { - method: "POST", - headers: { - Authorization: `Bearer ${process.env.DUB_API_KEY}`, - "Content-Type": "application/json", - }, - body: JSON.stringify({ - url: process.env.NEXT_PUBLIC_URL + "/s/" + id, - key: id, - domain: "cap.link", - }), - }; - - await fetch("https://api.dub.co/links?projectSlug=cap", dubOptions) - .then((response) => response.json()) - .then((response) => console.log(response)) - .catch((err) => console.error(err)); + await dub.links.create({ + url: process.env.NEXT_PUBLIC_URL + "/s/" + id, + domain: "cap.link", + key: id, + }); } return new Response( diff --git a/apps/web/app/api/video/analytics/route.ts b/apps/web/app/api/video/analytics/route.ts index 699832e9..6a7f2922 100644 --- a/apps/web/app/api/video/analytics/route.ts +++ b/apps/web/app/api/video/analytics/route.ts @@ -1,3 +1,5 @@ +import { dub } from "@/utils/dub"; +import { ClicksCount } from "dub/models/components"; import { NextRequest } from "next/server"; export const revalidate = 300; @@ -15,32 +17,49 @@ export async function GET(request: NextRequest) { }); } - const dubOptions = { - method: "GET", - headers: { - Authorization: `Bearer ${process.env.DUB_API_KEY}`, - "Content-Type": "application/json", - }, - }; + try { + const response = await dub.analytics.retrieve({ + domain: "cap.link", + key: videoId, + }); + const { clicks: analytics } = response as ClicksCount; - const analytics = await fetch( - `https://api.dub.co/analytics/clicks?projectSlug=cap&domain=cap.link&key=${videoId}`, - dubOptions - ).then((response) => response.json()); + if (typeof analytics !== "number" || analytics === null) { + return new Response(JSON.stringify({ error: true }), { + status: 401, + headers: { + "Content-Type": "application/json", + }, + }); + } - if (typeof analytics !== "number") { + return new Response(JSON.stringify({ count: analytics }), { + status: 200, + headers: { + "Content-Type": "application/json", + }, + }); + } catch (error: any) { + if (error.code === "not_found") { + return new Response( + JSON.stringify({ + error: true, + message: "Video link not found.", + docUrl: error.docUrl, + }), + { + status: 404, + headers: { + "Content-Type": "application/json", + }, + } + ); + } return new Response(JSON.stringify({ error: true }), { - status: 401, + status: 500, headers: { "Content-Type": "application/json", }, }); } - - return new Response(JSON.stringify({ count: analytics }), { - status: 200, - headers: { - "Content-Type": "application/json", - }, - }); } diff --git a/apps/web/app/dashboard/_components/DynamicSharedLayout.tsx b/apps/web/app/dashboard/_components/DynamicSharedLayout.tsx index 3d25ae09..8b46896f 100644 --- a/apps/web/app/dashboard/_components/DynamicSharedLayout.tsx +++ b/apps/web/app/dashboard/_components/DynamicSharedLayout.tsx @@ -6,15 +6,14 @@ import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, - DropdownMenuLabel, DropdownMenuSeparator, DropdownMenuTrigger, - Button, } from "@cap/ui"; import { UsageButton } from "@/components/UsageButton"; import { users, spaces } from "@cap/database/schema"; import Link from "next/link"; import { isUserOnProPlan } from "@cap/utils"; +import { signOut } from "next-auth/react"; type SharedContext = { spaceData: (typeof spaces.$inferSelect)[] | null; @@ -119,9 +118,9 @@ export default function DynamicSharedLayout({ - + diff --git a/apps/web/app/dashboard/caps/Caps.tsx b/apps/web/app/dashboard/caps/Caps.tsx index 9c4a68c7..4eea4e7d 100644 --- a/apps/web/app/dashboard/caps/Caps.tsx +++ b/apps/web/app/dashboard/caps/Caps.tsx @@ -26,6 +26,7 @@ import { } from "@cap/ui"; import { debounce } from "lodash"; import { playlistToMp4 } from "@/utils/video/ffmpeg/helpers"; +import { Tooltip } from "react-tooltip"; type videoData = { id: string; @@ -144,7 +145,7 @@ export const Caps = ({ data, count }: { data: videoData; count: number }) => { } ) .finally(() => { - setIsDownloading(null); // Reset downloading state after completion or failure + setIsDownloading(null); }); }; @@ -211,7 +212,7 @@ export const Caps = ({ data, count }: { data: videoData; count: number }) => { return (
{ {titles[cap.id] || cap.name}

)} -

- {moment(cap.createdAt).fromNow()} +

+ + {moment(cap.createdAt).fromNow()} + +

-
-
+
+
{videoAnalytics ?? "-"} +
-
+
{cap.totalComments} +
-
+
{cap.totalReactions} +
diff --git a/apps/web/app/login/form.tsx b/apps/web/app/login/form.tsx index 90e8c8b7..c78c551a 100644 --- a/apps/web/app/login/form.tsx +++ b/apps/web/app/login/form.tsx @@ -11,6 +11,7 @@ export function LoginForm() { const next = searchParams?.get("next"); const [email, setEmail] = useState(""); const [loading, setLoading] = useState(false); + const [emailSent, setEmailSent] = useState(false); useEffect(() => { const error = searchParams?.get("error"); @@ -31,21 +32,17 @@ export function LoginForm() { ...(next && next.length > 0 ? { callbackUrl: next } : {}), }) .then((res) => { - console.log("res"); - console.log(res); setLoading(false); if (res?.ok && !res?.error) { setEmail(""); - toast.success("Email sent - check your inbox!", { - duration: 20000, - }); + setEmailSent(true); + toast.success("Email sent - check your inbox!"); } else { toast.error("Error sending email - try again?"); } }) .catch((err) => { - console.log("err"); - console.log(err); + setEmailSent(false); setLoading(false); toast.error("Error sending email - try again?"); }); @@ -58,10 +55,11 @@ export function LoginForm() { name="email" autoFocus type="email" - placeholder="tim@apple.com" + placeholder={emailSent ? "" : "tim@apple.com"} autoComplete="email" required value={email} + disabled={emailSent} onChange={(e) => { setEmail(e.target.value); }} @@ -73,9 +71,9 @@ export function LoginForm() { size="lg" className="h-12 text-lg" type="submit" - disabled={loading} + disabled={loading || emailSent} > - Continue with Email + {emailSent ? "Email was sent to your inbox" : "Continue with Email"}

By typing your email and clicking continue, you acknowledge that you @@ -98,6 +96,20 @@ export function LoginForm() { .

+ {emailSent && ( +
+ +
+ )} ); } diff --git a/apps/web/app/s/[videoId]/_components/ShareHeader.tsx b/apps/web/app/s/[videoId]/_components/ShareHeader.tsx index 267c24dc..d5361b03 100644 --- a/apps/web/app/s/[videoId]/_components/ShareHeader.tsx +++ b/apps/web/app/s/[videoId]/_components/ShareHeader.tsx @@ -6,7 +6,6 @@ import { useRouter } from "next/navigation"; import { useState } from "react"; import { toast } from "react-hot-toast"; import { LinkIcon } from "lucide-react"; -import { Tooltip } from "react-tooltip"; export const ShareHeader = ({ data, @@ -53,7 +52,6 @@ export const ShareHeader = ({ return ( <> -
diff --git a/apps/web/app/s/[videoId]/_components/ShareVideo.tsx b/apps/web/app/s/[videoId]/_components/ShareVideo.tsx index bfcf4e18..756b9486 100644 --- a/apps/web/app/s/[videoId]/_components/ShareVideo.tsx +++ b/apps/web/app/s/[videoId]/_components/ShareVideo.tsx @@ -11,7 +11,6 @@ import { } from "lucide-react"; import { LogoSpinner } from "@cap/ui"; import { userSelectProps } from "@cap/database/auth/session"; -import { Tooltip } from "react-tooltip"; import { fromVtt, Subtitle } from "subtitles-parser-vtt"; import { is } from "drizzle-orm"; import toast from "react-hot-toast"; @@ -410,7 +409,6 @@ export const ShareVideo = ({ : "User" }`} > - {comment.type === "text" ? ( = 3' + dependencies: + zod: 3.22.4 + dev: false + /eastasianwidth@0.2.0: resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} @@ -10007,7 +10018,7 @@ packages: doctrine: 2.1.0 eslint: 8.57.0 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.8.0(@typescript-eslint/parser@5.62.0)(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) + eslint-module-utils: 2.8.0(@typescript-eslint/parser@7.13.1)(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) hasown: 2.0.0 is-core-module: 2.13.1 is-glob: 4.0.3