15 Commits

Author SHA1 Message Date
7ba16464c4 handle file variations correctly
All checks were successful
continuous-integration/drone/push Build is passing
2025-08-25 19:47:44 -07:00
66230d031c do not redownload 2025-08-25 19:47:44 -07:00
f6aa44ffc5 return list verbatim if no cursor 2025-08-25 19:47:44 -07:00
ae166d8509 do not error on remove 2025-08-25 19:47:44 -07:00
a4ae552169 fix cursor bug 2025-08-25 19:47:44 -07:00
23d687e072 explicit error path 2025-08-25 19:47:44 -07:00
71bbfa0128 fix stack overflow 2025-08-25 19:47:44 -07:00
89da9108c2 allow the versions to not exist 2025-08-25 19:47:44 -07:00
04d5592aaf delete cursor file if completed 2025-08-25 19:47:44 -07:00
bd3605ab87 allow the cursor to not exist 2025-08-25 19:47:44 -07:00
13cff42bbc fix error path 2025-08-25 19:47:44 -07:00
60ba5511ad plumb api key through DownloadCreationsHistory
All checks were successful
continuous-integration/drone/push Build is passing
2025-08-25 17:54:52 -07:00
cf67ad510b allow resume from files
All checks were successful
continuous-integration/drone/push Build is passing
2025-08-25 17:42:04 -07:00
e6a548a1a1 get_asset_v2 2025-08-25 17:42:04 -07:00
d2bee93fbb DownloadCreationsHistory 2025-08-25 17:42:04 -07:00

View File

@@ -2,7 +2,7 @@ use std::io::Read;
use std::path::{Path,PathBuf};
use clap::{Args,Parser,Subcommand};
use anyhow::{anyhow,Result as AResult};
use futures::StreamExt;
use futures::{StreamExt,TryStreamExt};
use rbx_asset::cloud::{ApiKey,Context as CloudContext};
use rbx_asset::cookie::{Cookie,Context as CookieContext,AssetVersion,CreationsItem};
@@ -10,6 +10,7 @@ type AssetID=u64;
type AssetIDFileMap=Vec<(AssetID,PathBuf)>;
const CONCURRENT_DECODE:usize=8;
const CONCURRENT_REQUESTS:usize=32;
const CONCURRENT_FS:usize=64;
#[derive(Parser)]
#[command(author,version,about,long_about=None)]
@@ -30,6 +31,7 @@ enum Commands{
DownloadVersionV2(DownloadVersionSubcommand),
DownloadDecompile(DownloadDecompileSubcommand),
DownloadCreationsJson(DownloadCreationsJsonSubcommand),
DownloadCreationsHistory(DownloadCreationsHistorySubcommand),
DownloadUserInventoryJson(DownloadUserInventoryJsonSubcommand),
CreateAsset(CreateAssetSubcommand),
CreateAssetMedia(CreateAssetMediaSubcommand),
@@ -605,6 +607,7 @@ async fn main()->AResult<()>{
subcommand.output_folder.unwrap_or_else(||std::env::current_dir().unwrap()),
subcommand.continue_from_cursor.unwrap_or(false),
).await,
Commands::DownloadCreationsHistory(subcommand)=>subcommand.run().await,
Commands::DownloadUserInventoryJson(subcommand)=>download_user_inventory_json(
cookie_from_args(
subcommand.cookie_literal,
@@ -1166,10 +1169,10 @@ async fn get_creations_pages(
loop{
let mut page=context.get_creations_page(&config).await?;
asset_list.append(&mut page.data);
if page.nextPageCursor.is_none(){
config.cursor=page.nextPageCursor;
if config.cursor.is_none(){
break;
}
config.cursor=page.nextPageCursor;
}
Ok(())
}
@@ -1182,15 +1185,34 @@ async fn download_creations_pages_from_checkpoint(context:&CookieContext,owner:r
let (mut asset_list,mut config)=if continue_from_cursor{
// load state from files
let (versions,cursor)=tokio::try_join!(
let (versions,cursor)=tokio::join!(
tokio::fs::read(versions_path.as_path()),
tokio::fs::read_to_string(cursor_path.as_path()),
)?;
);
// allow versions to not exist
let (versions,cursor)=match (versions,cursor){
// continue downloading
(Ok(versions),Ok(cursor))=>(serde_json::from_slice(&versions)?,Some(cursor)),
// already downloaded
(Ok(versions),Err(e)) if matches!(e.kind(),std::io::ErrorKind::NotFound)=>return Ok(serde_json::from_slice(&versions)?),
// not downloaded
(Err(e),result) if matches!(e.kind(),std::io::ErrorKind::NotFound)=>{
match result{
Ok(_)=>{},
Err(e) if matches!(e.kind(),std::io::ErrorKind::NotFound)=>{},
Err(e)=>Err(e)?,
}
(Vec::new(),None)
},
// other errors
(Ok(_),Err(e))=>Err(e)?,
(Err(e),_)=>Err(e)?,
};
(
serde_json::from_slice(&versions)?,
versions,
rbx_asset::cookie::CreationsPageRequest{
owner,
cursor:Some(cursor),
cursor,
}
)
}else{
@@ -1204,16 +1226,21 @@ async fn download_creations_pages_from_checkpoint(context:&CookieContext,owner:r
)
};
match get_creations_pages(&context,&mut asset_list,&mut config).await{
Ok(())=>println!("Pages polling complete"),
Err(e)=>println!("Error: {e}"),
}
get_creations_pages(&context,&mut asset_list,&mut config).await?;
let cursor_fut=async{
if let Some(cursor)=config.cursor{
println!("writing cursor state...");
// there was a problem, write out cursor
tokio::fs::write(cursor_path,cursor).await?;
}else{
// no cursor
if let Err(e)=tokio::fs::remove_file(cursor_path).await{
match e.kind(){
std::io::ErrorKind::NotFound=>println!("Cannot delete cursor: file not found"),
_=>Err(e)?,
}
}
}
Ok(())
};
@@ -1300,6 +1327,148 @@ async fn download_user_inventory_json(cookie:Cookie,user_id:u64,output_folder:Pa
Ok(())
}
/// Download all versions of all assets created by a group or user. The output is written to a folder structure in the output directory.
#[derive(Args)]
struct DownloadCreationsHistorySubcommand{
#[arg(long,group="cookie",required=true)]
cookie_literal:Option<String>,
#[arg(long,group="cookie",required=true)]
cookie_envvar:Option<String>,
#[arg(long,group="cookie",required=true)]
cookie_file:Option<PathBuf>,
#[arg(long,group="api_key",required=true)]
api_key_literal:Option<String>,
#[arg(long,group="api_key",required=true)]
api_key_envvar:Option<String>,
#[arg(long,group="api_key",required=true)]
api_key_file:Option<PathBuf>,
#[arg(long)]
output_folder:Option<PathBuf>,
#[arg(long,group="owner",required=true)]
group_id:Option<u64>,
#[arg(long,group="owner",required=true)]
user_id:Option<u64>,
#[arg(long)]
r#continue:Option<bool>,
}
impl DownloadCreationsHistorySubcommand{
async fn run(self)->AResult<()>{
download_creations_history(
cookie_from_args(
self.cookie_literal,
self.cookie_envvar,
self.cookie_file,
).await?,
api_key_from_args(
self.api_key_literal,
self.api_key_envvar,
self.api_key_file,
).await?,
owner_from_args(
self.user_id,
self.group_id,
)?,
self.output_folder.unwrap_or_else(||std::env::current_dir().unwrap()),
self.r#continue.unwrap_or(false),
).await
}
}
async fn download_creations_history(cookie:Cookie,api_key:ApiKey,owner:rbx_asset::cookie::Owner,output_folder:PathBuf,r#continue:bool)->AResult<()>{
let cookie_context=CookieContext::new(cookie);
let cloud_context=CloudContext::new(api_key);
// get list of all assets in inventory
let asset_list=download_creations_pages_from_checkpoint(&cookie_context,owner,output_folder.as_path(),r#continue).await?;
// create folder directories
let asset_folders:Vec<PathBuf> ={
futures::stream::iter(asset_list.iter().map(|asset|async{
// create asset folder
let mut asset_folder=output_folder.clone();
asset_folder.push(asset.id.to_string());
tokio::fs::create_dir_all(asset_folder.as_path()).await?;
Ok::<_,anyhow::Error>(asset_folder)
}))
.buffered(CONCURRENT_FS)
.try_collect().await?
};
#[expect(dead_code)]
#[derive(Debug)]
enum Error<'a>{
NoLocations(Job<'a>),
GetVersionLocationError(rbx_asset::cloud::GetError),
GetError(rbx_asset::cloud::GetError),
Io(std::io::Error),
}
#[derive(Clone,Copy,Debug)]
struct Job<'a>{
path:&'a PathBuf,
asset_id:u64,
asset_version:u64,
}
let mut job_list=Vec::new();
// create flattened futures stream to parallel download all asset versions
for (path,asset) in asset_folders.iter().zip(asset_list){
// save versions file
let mut versions_path=path.to_owned();
versions_path.push("versions.json");
let version_history=if r#continue{
let file=tokio::fs::read(versions_path.as_path()).await?;
serde_json::from_slice(&file)?
}else{
println!("Downloading history for {} - {}",asset.id,asset.name);
let version_history=get_version_history(&cookie_context,asset.id).await?;
println!("Found {} versions",version_history.len());
tokio::fs::write(versions_path,serde_json::to_string(&version_history)?).await?;
version_history
};
job_list.extend(version_history.into_iter().map(|asset_version|
Job{
path,
asset_id:asset.id,
asset_version:asset_version.assetVersionNumber,
}
));
}
println!("Completed jobs list. Number of jobs: {}",job_list.len());
futures::stream::iter(job_list).map(async|job|{
let mut dest=job.path.to_owned();
dest.push(format!("{}_v{}.rbxl",job.asset_id,job.asset_version));
//if the file already exists, don't try downloading it again
if tokio::fs::try_exists(dest.as_path()).await.map_err(Error::Io)?{
return Ok(());
}
let location=cloud_context.get_asset_version_location(rbx_asset::cloud::GetAssetVersionRequest{
asset_id:job.asset_id,
version:job.asset_version,
}).await.map_err(Error::GetVersionLocationError)?;
let location=location.location.ok_or(Error::NoLocations(job))?;
let downloaded=cloud_context.get_asset(&location).await.map_err(Error::GetError)?;
tokio::fs::write(dest,downloaded.to_vec().map_err(Error::Io)?).await.map_err(Error::Io)?;
Ok(())
})
.buffer_unordered(CONCURRENT_REQUESTS)
.for_each(async|result|{
match result{
Ok(())=>{},
Err(Error::NoLocations(job))=>println!("Job failed due to no locations: asset_id={} version={}",job.asset_id,job.asset_version),
Err(e)=>println!("Error: {e:?}"),
}
}).await;
println!("All jobs complete.");
Ok(())
}
async fn get_version_history(context:&CookieContext,asset_id:AssetID)->AResult<Vec<AssetVersion>>{
let mut page_request=rbx_asset::cookie::AssetVersionsPageRequest{
asset_id,