1- use std:: io:: { Read , Write } ;
2- use std:: path:: { Path , PathBuf } ;
3-
4- use rayon:: prelude:: * ;
5- use rayon:: slice:: ParallelSlice ;
6-
7- use super :: common:: { collect_owners, collect_tags, get_repo_hash} ;
8- use super :: owner_resolver:: find_owners_for_file;
9- use super :: parse:: parse_repo;
10- use super :: tag_resolver:: find_tags_for_file;
11- use super :: types:: { CacheEncoding , CodeownersCache , CodeownersEntry , FileEntry } ;
12- use crate :: core:: resolver:: find_owners_and_tags_for_file;
13- use crate :: core:: types:: { CodeownersEntryMatcher , codeowners_entry_to_matcher} ;
14- use crate :: utils:: error:: { Error , Result } ;
1+ use crate :: {
2+ core:: {
3+ common:: { collect_owners, collect_tags, get_repo_hash} ,
4+ parse:: parse_repo,
5+ resolver:: find_owners_and_tags_for_file,
6+ types:: {
7+ CacheEncoding , CodeownersCache , CodeownersEntry , CodeownersEntryMatcher , FileEntry ,
8+ codeowners_entry_to_matcher,
9+ } ,
10+ } ,
11+ utils:: error:: { Error , Result } ,
12+ } ;
13+ use rayon:: { iter:: ParallelIterator , slice:: ParallelSlice } ;
14+ use std:: {
15+ io:: { Read , Write } ,
16+ path:: { Path , PathBuf } ,
17+ } ;
1518
1619/// Create a cache from parsed CODEOWNERS entries and files
1720pub fn build_cache (
@@ -20,21 +23,37 @@ pub fn build_cache(
2023 let mut owners_map = std:: collections:: HashMap :: new ( ) ;
2124 let mut tags_map = std:: collections:: HashMap :: new ( ) ;
2225
23- println ! ( "start building cache" ) ;
24-
2526 let matched_entries: Vec < CodeownersEntryMatcher > = entries
2627 . iter ( )
2728 . map ( |entry| codeowners_entry_to_matcher ( entry) )
2829 . collect ( ) ;
2930
3031 // Process each file to find owners and tags
32+ let total_files = files. len ( ) ;
33+ let processed_count = std:: sync:: atomic:: AtomicUsize :: new ( 0 ) ;
34+
3135 let file_entries: Vec < FileEntry > = files
3236 . par_chunks ( 100 )
3337 . flat_map ( |chunk| {
3438 chunk
3539 . iter ( )
3640 . map ( |file_path| {
37- println ! ( "Processing file: {}" , file_path. display( ) ) ;
41+ let current =
42+ processed_count. fetch_add ( 1 , std:: sync:: atomic:: Ordering :: Relaxed ) + 1 ;
43+
44+ // Limit filename display length and clear the line properly
45+ let file_display = file_path. display ( ) . to_string ( ) ;
46+ let truncated_file = if file_display. len ( ) > 60 {
47+ format ! ( "...{}" , & file_display[ file_display. len( ) - 57 ..] )
48+ } else {
49+ file_display
50+ } ;
51+
52+ print ! (
53+ "\r \x1b [K📁 Processing [{}/{}] {}" ,
54+ current, total_files, truncated_file
55+ ) ;
56+ std:: io:: stdout ( ) . flush ( ) . unwrap ( ) ;
3857
3958 let ( owners, tags) =
4059 find_owners_and_tags_for_file ( file_path, & matched_entries) . unwrap ( ) ;
@@ -50,7 +69,8 @@ pub fn build_cache(
5069 } )
5170 . collect ( ) ;
5271
53- println ! ( "file entry done" ) ;
72+ // Print newline after processing is complete
73+ println ! ( "\r \x1b [K✅ Processed {} files successfully" , total_files) ;
5474
5575 // Process each owner
5676 let owners = collect_owners ( & entries) ;
@@ -63,8 +83,6 @@ pub fn build_cache(
6383 }
6484 } ) ;
6585
66- println ! ( "owner done" ) ;
67-
6886 // Process each tag
6987 let tags = collect_tags ( & entries) ;
7088 tags. iter ( ) . for_each ( |tag| {
@@ -76,8 +94,6 @@ pub fn build_cache(
7694 }
7795 } ) ;
7896
79- println ! ( "tag done" ) ;
80-
8197 Ok ( CodeownersCache {
8298 hash,
8399 entries,
0 commit comments