diff --git a/src-tauri/src/commands/poller.rs b/src-tauri/src/commands/poller.rs index 1ef2e29..39ee86d 100644 --- a/src-tauri/src/commands/poller.rs +++ b/src-tauri/src/commands/poller.rs @@ -184,7 +184,7 @@ pub fn get_queue_status( .lock() .map_err(|e| AppError::from(format!("Database lock failed: {}", e)))?; - let tickets = ProcessedTicket::list_by_project(&db, &project_id)?; + let tickets = ProcessedTicket::list_by_project_summary(&db, &project_id)?; Ok(tickets) } @@ -246,7 +246,7 @@ pub fn get_runtime_activity( .db .lock() .map_err(|e| AppError::from(format!("Database lock failed: {}", e)))?; - let project_tickets = ProcessedTicket::list_by_project(&db, &project_id)?; + let project_tickets = ProcessedTicket::list_by_project_summary(&db, &project_id)?; let mut agents_by_ticket_id: HashMap = HashMap::new(); for ticket in &project_tickets { diff --git a/src-tauri/src/commands/tracker.rs b/src-tauri/src/commands/tracker.rs index 0917d06..6749bdc 100644 --- a/src-tauri/src/commands/tracker.rs +++ b/src-tauri/src/commands/tracker.rs @@ -165,6 +165,6 @@ pub fn list_processed_tickets( .lock() .map_err(|e| AppError::from(format!("Database lock failed: {}", e)))?; - let tickets = ProcessedTicket::list_by_project(&db, &project_id)?; + let tickets = ProcessedTicket::list_by_project_summary(&db, &project_id)?; Ok(tickets) } diff --git a/src-tauri/src/models/ticket.rs b/src-tauri/src/models/ticket.rs index 1d8994b..d17b980 100644 --- a/src-tauri/src/models/ticket.rs +++ b/src-tauri/src/models/ticket.rs @@ -52,6 +52,9 @@ fn from_row(row: &rusqlite::Row) -> rusqlite::Result { const SELECT_ALL_COLS: &str = "SELECT id, tracker_id, project_id, source, source_ref, \ artifact_id, artifact_title, artifact_data, status, analyst_report, developer_report, \ worktree_path, branch_name, detected_at, processed_at FROM processed_tickets"; +const SELECT_SUMMARY_COLS: &str = "SELECT id, tracker_id, project_id, source, source_ref, \ + artifact_id, artifact_title, '' AS artifact_data, status, analyst_report, developer_report, \ + worktree_path, branch_name, detected_at, processed_at FROM processed_tickets"; impl ProcessedTicket { /// Atomically insert a new ticket keyed by (tracker_id, artifact_id). @@ -168,6 +171,7 @@ impl ProcessedTicket { rows.collect() } + #[allow(dead_code)] pub fn list_by_project(conn: &Connection, project_id: &str) -> Result> { let sql = format!( "{} WHERE project_id = ?1 ORDER BY detected_at DESC", @@ -178,6 +182,19 @@ impl ProcessedTicket { rows.collect() } + pub fn list_by_project_summary( + conn: &Connection, + project_id: &str, + ) -> Result> { + let sql = format!( + "{} WHERE project_id = ?1 ORDER BY detected_at DESC", + SELECT_SUMMARY_COLS + ); + let mut stmt = conn.prepare(&sql)?; + let rows = stmt.query_map(params![project_id], from_row)?; + rows.collect() + } + pub fn get_project_throughput_stats( conn: &Connection, project_id: &str, @@ -801,4 +818,25 @@ mod tests { assert!(tickets.iter().any(|ticket| ticket.source == "graylog")); assert!(tickets.iter().any(|ticket| ticket.source == "tuleap")); } + + #[test] + fn test_list_by_project_summary_omits_artifact_payload() { + let (conn, project_id, tracker_id) = setup(); + + ProcessedTicket::insert_if_new( + &conn, + &project_id, + &tracker_id, + 999, + "Large payload ticket", + &"x".repeat(10_000), + ) + .expect("insert should succeed"); + + let tickets = ProcessedTicket::list_by_project_summary(&conn, &project_id) + .expect("summary list should succeed"); + + assert!(!tickets.is_empty()); + assert!(tickets.iter().all(|ticket| ticket.artifact_data.is_empty())); + } }