Skip to content

Commit

Permalink
rsc: Reenable minified version of trace logging (#1552)
Browse files Browse the repository at this point in the history
* rsc: Reenable minified version of trace logging

* cleanup and shorten log

* address comments
  • Loading branch information
V-FEXrt authored May 3, 2024
1 parent eb391bc commit 4173d52
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 13 deletions.
4 changes: 3 additions & 1 deletion rust/rsc/src/rsc/add_job.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,15 @@ use tracing;
#[path = "../common/database.rs"]
mod database;

#[tracing::instrument]
#[tracing::instrument(skip_all)]
pub async fn add_job(
Json(payload): Json<AddJobPayload>,
conn: Arc<DatabaseConnection>,
) -> StatusCode {
// First construct all the job details as an ActiveModel for insert
let hash = payload.hash();
tracing::info!(hash);

let vis = payload.visible_files;
let output_files = payload.output_files;
let output_symlinks = payload.output_symlinks;
Expand Down
4 changes: 2 additions & 2 deletions rust/rsc/src/rsc/blob.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,13 @@ pub trait BlobStore {

pub trait DebugBlobStore: BlobStore + std::fmt::Debug {}

#[tracing::instrument]
#[tracing::instrument(skip_all)]
pub async fn get_upload_url(server_addr: String) -> Json<GetUploadUrlResponse> {
let url = server_addr + "/blob";
Json(GetUploadUrlResponse { url })
}

#[tracing::instrument]
#[tracing::instrument(skip_all)]
pub async fn create_blob(
mut multipart: Multipart,
db: Arc<DatabaseConnection>,
Expand Down
14 changes: 7 additions & 7 deletions rust/rsc/src/rsc/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ use std::time::Duration;
use tracing;

use sea_orm::{
prelude::Uuid, ActiveModelTrait, ActiveValue::*, ColumnTrait, ConnectionTrait, Database,
DatabaseConnection, EntityTrait, QueryFilter,
prelude::Uuid, ActiveModelTrait, ActiveValue::*, ColumnTrait, ConnectOptions, ConnectionTrait,
Database, DatabaseConnection, EntityTrait, QueryFilter,
};

use chrono::Utc;
Expand Down Expand Up @@ -230,7 +230,9 @@ async fn create_standalone_db() -> Result<DatabaseConnection, sea_orm::DbErr> {
async fn create_remote_db(
config: &config::RSCConfig,
) -> Result<DatabaseConnection, Box<dyn std::error::Error>> {
let connection = Database::connect(&config.database_url).await?;
let mut opt = ConnectOptions::new(&config.database_url);
opt.sqlx_logging_level(tracing::log::LevelFilter::Debug);
let connection = Database::connect(opt).await?;
let pending_migrations = Migrator::get_pending_migrations(&connection).await?;
if pending_migrations.len() != 0 {
let err = Error::new(
Expand Down Expand Up @@ -368,10 +370,8 @@ fn launch_blob_eviction(
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// setup a subscriber for logging
// TODO: The logging is incredibly spammy right now and causes significant slow down.
// for now, logging is disabled but this should be turned back on once logging is pruned.
// let subscriber = tracing_subscriber::FmtSubscriber::new();
// tracing::subscriber::set_global_default(subscriber)?;
let subscriber = tracing_subscriber::FmtSubscriber::new();
tracing::subscriber::set_global_default(subscriber)?;

// Parse the arguments
let args = ServerOptions::parse();
Expand Down
11 changes: 8 additions & 3 deletions rust/rsc/src/rsc/read_job.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use std::collections::HashMap;
use std::sync::Arc;
use tracing;

#[tracing::instrument]
#[tracing::instrument(skip(conn))]
async fn record_use(job_id: Uuid, conn: Arc<DatabaseConnection>) {
let usage = job_use::ActiveModel {
id: NotSet,
Expand All @@ -22,6 +22,7 @@ async fn record_use(job_id: Uuid, conn: Arc<DatabaseConnection>) {
let _ = usage.insert(conn.as_ref()).await;
}

#[tracing::instrument(skip(db, stores))]
async fn resolve_blob(
id: Uuid,
db: &DatabaseTransaction,
Expand All @@ -44,12 +45,14 @@ async fn resolve_blob(
});
}

#[tracing::instrument]
#[tracing::instrument(skip_all)]
pub async fn read_job(
Json(payload): Json<ReadJobPayload>,
conn: Arc<DatabaseConnection>,
blob_stores: HashMap<Uuid, Arc<dyn blob::DebugBlobStore + Sync + Send>>,
) -> (StatusCode, Json<ReadJobResponse>) {
let hash = payload.hash();

// TODO: This transaction is quite large with a bunch of "serialized" queries. If read_job
// becomes a bottleneck it should be rewritten such that joining on promises is delayed for as
// long as possible. Another option would be to collect all blob ids ahead of time and make a
Expand All @@ -59,13 +62,15 @@ pub async fn read_job(
.transaction::<_, (Option<Uuid>, ReadJobResponse), DbErr>(|txn| {
Box::pin(async move {
let Some(matching_job) = job::Entity::find()
.filter(job::Column::Hash.eq(payload.hash()))
.filter(job::Column::Hash.eq(hash.clone()))
.one(txn)
.await?
else {
tracing::info!(%hash, "Miss");
return Ok((None, ReadJobResponse::NoMatch));
};

tracing::info!(%hash, "Hit");
let output_files = matching_job
.find_related(output_file::Entity)
.all(txn)
Expand Down

0 comments on commit 4173d52

Please sign in to comment.