diff --git a/src/main.rs b/src/main.rs index 287cd6f..3e728bb 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,7 @@ use std::io::Read; use std::path::{Path,PathBuf}; use clap::{Args,Parser,Subcommand}; use anyhow::{anyhow,Result as AResult}; -use futures::StreamExt; +use futures::{StreamExt,TryStreamExt}; use rbx_asset::cloud::{ApiKey,Context as CloudContext}; use rbx_asset::cookie::{Cookie,Context as CookieContext,AssetVersion,CreationsItem}; @@ -10,6 +10,7 @@ type AssetID=u64; type AssetIDFileMap=Vec<(AssetID,PathBuf)>; const CONCURRENT_DECODE:usize=8; const CONCURRENT_REQUESTS:usize=32; +const CONCURRENT_FS:usize=64; #[derive(Parser)] #[command(author,version,about,long_about=None)] @@ -30,6 +31,7 @@ enum Commands{ DownloadVersionV2(DownloadVersionSubcommand), DownloadDecompile(DownloadDecompileSubcommand), DownloadCreationsJson(DownloadCreationsJsonSubcommand), + DownloadCreationsHistory(DownloadCreationsHistorySubcommand), DownloadUserInventoryJson(DownloadUserInventoryJsonSubcommand), CreateAsset(CreateAssetSubcommand), CreateAssetMedia(CreateAssetMediaSubcommand), @@ -605,6 +607,7 @@ async fn main()->AResult<()>{ subcommand.output_folder.unwrap_or_else(||std::env::current_dir().unwrap()), subcommand.continue_from_cursor.unwrap_or(false), ).await, + Commands::DownloadCreationsHistory(subcommand)=>subcommand.run().await, Commands::DownloadUserInventoryJson(subcommand)=>download_user_inventory_json( cookie_from_args( subcommand.cookie_literal, @@ -1300,6 +1303,112 @@ async fn download_user_inventory_json(cookie:Cookie,user_id:u64,output_folder:Pa Ok(()) } + +/// Download all versions of all assets created by a group or user. The output is written to a folder structure in the output directory. +#[derive(Args)] +struct DownloadCreationsHistorySubcommand{ + #[arg(long,group="cookie",required=true)] + cookie_literal:Option, + #[arg(long,group="cookie",required=true)] + cookie_envvar:Option, + #[arg(long,group="cookie",required=true)] + cookie_file:Option, + #[arg(long)] + output_folder:Option, + #[arg(long,group="owner",required=true)] + group_id:Option, + #[arg(long,group="owner",required=true)] + user_id:Option, + #[arg(long)] + r#continue:Option, +} +impl DownloadCreationsHistorySubcommand{ + async fn run(self)->AResult<()>{ + download_creations_history( + cookie_from_args( + self.cookie_literal, + self.cookie_envvar, + self.cookie_file, + ).await?, + owner_from_args( + self.user_id, + self.group_id, + )?, + self.output_folder.unwrap_or_else(||std::env::current_dir().unwrap()), + self.r#continue.unwrap_or(false), + ).await + } +} +async fn download_creations_history(cookie:Cookie,owner:rbx_asset::cookie::Owner,output_folder:PathBuf,r#continue:bool)->AResult<()>{ + + let context=CookieContext::new(cookie); + + // get list of all assets in inventory + let asset_list=download_creations_pages_from_checkpoint(&context,owner,output_folder.as_path(),r#continue).await?; + + // create folder directories + let asset_folders:Vec ={ + futures::stream::iter(asset_list.iter().map(|asset|async{ + // create asset folder + let mut asset_folder=output_folder.clone(); + asset_folder.push(asset.id.to_string()); + tokio::fs::create_dir_all(asset_folder.as_path()).await?; + Ok::<_,anyhow::Error>(asset_folder) + })) + .buffered(CONCURRENT_FS) + .try_collect().await? + }; + + struct Job<'a>{ + path:&'a PathBuf, + asset_id:u64, + asset_version:u64, + } + let mut job_list=Vec::new(); + + // create flattened futures stream to parallel download all asset versions + for (path,asset) in asset_folders.iter().zip(asset_list){ + println!("Downloading history for {} - {}",asset.id,asset.name); + + let version_history=get_version_history(&context,asset.id).await?; + + println!("Found {} versions",version_history.len()); + // save versions file + let mut versions_path=path.to_owned(); + versions_path.push("versions.json"); + tokio::fs::write(versions_path,serde_json::to_string(&version_history)?).await?; + + job_list.extend(version_history.into_iter().map(|asset_version| + Job{ + path, + asset_id:asset.id, + asset_version:asset_version.assetVersionNumber, + } + )); + } + + println!("Completed jobs list. Number of jobs: {}",job_list.len()); + + futures::stream::iter(job_list).map(async|job|{ + let downloaded=context.get_asset(rbx_asset::cookie::GetAssetRequest{ + asset_id:job.asset_id, + version:Some(job.asset_version), + }).await?; + Ok((job,downloaded)) + }) + .buffer_unordered(CONCURRENT_REQUESTS) + .try_for_each(async|(job,downloaded)|{ + let mut dest=job.path.to_owned(); + dest.push(format!("{}_v{}.rbxl",job.asset_id,job.asset_version)); + tokio::fs::write(dest,downloaded.to_vec()?).await?; + Ok::<_,anyhow::Error>(()) + }).await?; + + println!("All jobs complete."); + + Ok(()) +} + async fn get_version_history(context:&CookieContext,asset_id:AssetID)->AResult>{ let mut page_request=rbx_asset::cookie::AssetVersionsPageRequest{ asset_id,