1
- use std:: {
2
- cell:: RefCell ,
3
- sync:: { atomic:: AtomicBool , Arc } ,
4
- } ;
1
+ use std:: { cell:: RefCell , sync:: atomic:: AtomicBool } ;
5
2
6
- use gix_features:: progress:: NestedProgress ;
7
- use gix_features:: { parallel, progress:: Progress } ;
3
+ use gix_features:: parallel;
8
4
use gix_hash:: ObjectId ;
9
5
10
6
use crate :: { data:: output, find} ;
@@ -30,16 +26,16 @@ pub type Result<E1, E2> = std::result::Result<(Vec<output::Count>, Outcome), Err
30
26
/// * `objects_ids`
31
27
/// * A list of objects ids to add to the pack. Duplication checks are performed so no object is ever added to a pack twice.
32
28
/// * Objects may be expanded based on the provided [`options`][Options]
33
- /// * `progress `
34
- /// * a way to obtain progress information
29
+ /// * `objects `
30
+ /// * count the amount of objects we encounter
35
31
/// * `should_interrupt`
36
32
/// * A flag that is set to true if the operation should stop
37
33
/// * `options`
38
34
/// * more configuration
39
35
pub fn objects < Find , Iter , IterErr , Oid > (
40
36
db : Find ,
41
37
objects_ids : Iter ,
42
- progress : impl NestedProgress ,
38
+ objects : & dyn gix_features :: progress :: Count ,
43
39
should_interrupt : & AtomicBool ,
44
40
Options {
45
41
thread_limit,
@@ -66,52 +62,45 @@ where
66
62
size : chunk_size,
67
63
} ;
68
64
let seen_objs = gix_hashtable:: sync:: ObjectIdMap :: default ( ) ;
69
- let progress = Arc :: new ( parking_lot :: Mutex :: new ( progress ) ) ;
65
+ let objects = objects . counter ( ) ;
70
66
71
67
parallel:: in_parallel (
72
68
chunks,
73
69
thread_limit,
74
70
{
75
- let progress = Arc :: clone ( & progress) ;
76
- move |n| {
71
+ move |_| {
77
72
(
78
73
Vec :: new ( ) , // object data buffer
79
74
Vec :: new ( ) , // object data buffer 2 to hold two objects at a time
80
- {
81
- let mut p = progress
82
- . lock ( )
83
- . add_child_with_id ( format ! ( "thread {n}" ) , gix_features:: progress:: UNKNOWN ) ;
84
- p. init ( None , gix_features:: progress:: count ( "objects" ) ) ;
85
- p
86
- } ,
75
+ objects. clone ( ) ,
87
76
)
88
77
}
89
78
} ,
90
79
{
91
80
let seen_objs = & seen_objs;
92
- move |oids : Vec < std:: result:: Result < Oid , IterErr > > , ( buf1, buf2, progress ) | {
81
+ move |oids : Vec < std:: result:: Result < Oid , IterErr > > , ( buf1, buf2, objects ) | {
93
82
expand:: this (
94
83
& db,
95
84
input_object_expansion,
96
85
seen_objs,
97
86
oids,
98
87
buf1,
99
88
buf2,
100
- progress ,
89
+ objects ,
101
90
should_interrupt,
102
91
true , /*allow pack lookups*/
103
92
)
104
93
}
105
94
} ,
106
- reduce:: Statistics :: new ( progress ) ,
95
+ reduce:: Statistics :: new ( ) ,
107
96
)
108
97
}
109
98
110
99
/// Like [`objects()`] but using a single thread only to mostly save on the otherwise required overhead.
111
100
pub fn objects_unthreaded < Find , IterErr , Oid > (
112
101
db : Find ,
113
102
object_ids : impl Iterator < Item = std:: result:: Result < Oid , IterErr > > ,
114
- mut progress : impl Progress ,
103
+ objects : impl gix_features :: progress :: Count ,
115
104
should_interrupt : & AtomicBool ,
116
105
input_object_expansion : ObjectExpansion ,
117
106
) -> Result < find:: existing:: Error < Find :: Error > , IterErr >
@@ -130,7 +119,7 @@ where
130
119
object_ids,
131
120
& mut buf1,
132
121
& mut buf2,
133
- & mut progress ,
122
+ & objects . counter ( ) ,
134
123
should_interrupt,
135
124
false , /*allow pack lookups*/
136
125
)
@@ -139,7 +128,6 @@ where
139
128
mod expand {
140
129
use std:: sync:: atomic:: { AtomicBool , Ordering } ;
141
130
142
- use gix_features:: progress:: Progress ;
143
131
use gix_hash:: { oid, ObjectId } ;
144
132
use gix_object:: { CommitRefIter , TagRefIter } ;
145
133
@@ -161,7 +149,7 @@ mod expand {
161
149
oids : impl IntoIterator < Item = std:: result:: Result < Oid , IterErr > > ,
162
150
buf1 : & mut Vec < u8 > ,
163
151
#[ allow( clippy:: ptr_arg) ] buf2 : & mut Vec < u8 > ,
164
- progress : & mut impl Progress ,
152
+ objects : & gix_features :: progress :: AtomicStep ,
165
153
should_interrupt : & AtomicBool ,
166
154
allow_pack_lookups : bool ,
167
155
) -> super :: Result < find:: existing:: Error < Find :: Error > , IterErr >
@@ -197,7 +185,7 @@ mod expand {
197
185
let mut id = id. to_owned ( ) ;
198
186
199
187
loop {
200
- push_obj_count_unique ( & mut out, seen_objs, & id, location, progress , stats, false ) ;
188
+ push_obj_count_unique ( & mut out, seen_objs, & id, location, objects , stats, false ) ;
201
189
match obj. kind {
202
190
Tree | Blob => break ,
203
191
Tag => {
@@ -228,12 +216,12 @@ mod expand {
228
216
}
229
217
let ( obj, location) = db. find ( tree_id, buf1) ?;
230
218
push_obj_count_unique (
231
- & mut out, seen_objs, & tree_id, location, progress , stats, true ,
219
+ & mut out, seen_objs, & tree_id, location, objects , stats, true ,
232
220
) ;
233
221
gix_object:: TreeRefIter :: from_bytes ( obj. data )
234
222
} ;
235
223
236
- let objects = if parent_commit_ids. is_empty ( ) {
224
+ let objects_ref = if parent_commit_ids. is_empty ( ) {
237
225
traverse_delegate. clear ( ) ;
238
226
gix_traverse:: tree:: breadthfirst (
239
227
current_tree_iter,
@@ -242,7 +230,7 @@ mod expand {
242
230
stats. decoded_objects += 1 ;
243
231
match db. find ( oid, buf) . ok ( ) {
244
232
Some ( ( obj, location) ) => {
245
- progress . inc ( ) ;
233
+ objects . fetch_add ( 1 , Ordering :: Relaxed ) ;
246
234
stats. expanded_objects += 1 ;
247
235
out. push ( output:: Count :: from_data ( oid, location) ) ;
248
236
obj. try_into_tree_iter ( )
@@ -260,7 +248,7 @@ mod expand {
260
248
let ( parent_commit_obj, location) = db. find ( commit_id, buf2) ?;
261
249
262
250
push_obj_count_unique (
263
- & mut out, seen_objs, commit_id, location, progress , stats, true ,
251
+ & mut out, seen_objs, commit_id, location, objects , stats, true ,
264
252
) ;
265
253
CommitRefIter :: from_bytes ( parent_commit_obj. data )
266
254
. tree_id ( )
@@ -273,7 +261,7 @@ mod expand {
273
261
seen_objs,
274
262
& parent_tree_id,
275
263
location,
276
- progress ,
264
+ objects ,
277
265
stats,
278
266
true ,
279
267
) ;
@@ -295,8 +283,8 @@ mod expand {
295
283
}
296
284
& changes_delegate. objects
297
285
} ;
298
- for id in objects . iter ( ) {
299
- out. push ( id_to_count ( db, buf2, id, progress , stats, allow_pack_lookups) ) ;
286
+ for id in objects_ref . iter ( ) {
287
+ out. push ( id_to_count ( db, buf2, id, objects , stats, allow_pack_lookups) ) ;
300
288
}
301
289
break ;
302
290
}
@@ -308,7 +296,7 @@ mod expand {
308
296
let mut id = id;
309
297
let mut obj = ( obj, location) ;
310
298
loop {
311
- push_obj_count_unique ( & mut out, seen_objs, & id, obj. 1 . clone ( ) , progress , stats, false ) ;
299
+ push_obj_count_unique ( & mut out, seen_objs, & id, obj. 1 . clone ( ) , objects , stats, false ) ;
312
300
match obj. 0 . kind {
313
301
Tree => {
314
302
traverse_delegate. clear ( ) ;
@@ -319,7 +307,7 @@ mod expand {
319
307
stats. decoded_objects += 1 ;
320
308
match db. find ( oid, buf) . ok ( ) {
321
309
Some ( ( obj, location) ) => {
322
- progress . inc ( ) ;
310
+ objects . fetch_add ( 1 , Ordering :: Relaxed ) ;
323
311
stats. expanded_objects += 1 ;
324
312
out. push ( output:: Count :: from_data ( oid, location) ) ;
325
313
obj. try_into_tree_iter ( )
@@ -331,7 +319,7 @@ mod expand {
331
319
)
332
320
. map_err ( Error :: TreeTraverse ) ?;
333
321
for id in & traverse_delegate. non_trees {
334
- out. push ( id_to_count ( db, buf1, id, progress , stats, allow_pack_lookups) ) ;
322
+ out. push ( id_to_count ( db, buf1, id, objects , stats, allow_pack_lookups) ) ;
335
323
}
336
324
break ;
337
325
}
@@ -355,7 +343,7 @@ mod expand {
355
343
}
356
344
}
357
345
}
358
- AsIs => push_obj_count_unique ( & mut out, seen_objs, & id, location, progress , stats, false ) ,
346
+ AsIs => push_obj_count_unique ( & mut out, seen_objs, & id, location, objects , stats, false ) ,
359
347
}
360
348
}
361
349
outcome. total_objects = out. len ( ) ;
@@ -368,13 +356,13 @@ mod expand {
368
356
all_seen : & impl util:: InsertImmutable ,
369
357
id : & oid ,
370
358
location : Option < crate :: data:: entry:: Location > ,
371
- progress : & mut impl Progress ,
359
+ objects : & gix_features :: progress :: AtomicStep ,
372
360
statistics : & mut Outcome ,
373
361
count_expanded : bool ,
374
362
) {
375
363
let inserted = all_seen. insert ( id. to_owned ( ) ) ;
376
364
if inserted {
377
- progress . inc ( ) ;
365
+ objects . fetch_add ( 1 , Ordering :: Relaxed ) ;
378
366
statistics. decoded_objects += 1 ;
379
367
if count_expanded {
380
368
statistics. expanded_objects += 1 ;
@@ -388,11 +376,11 @@ mod expand {
388
376
db : & Find ,
389
377
buf : & mut Vec < u8 > ,
390
378
id : & oid ,
391
- progress : & mut impl Progress ,
379
+ objects : & gix_features :: progress :: AtomicStep ,
392
380
statistics : & mut Outcome ,
393
381
allow_pack_lookups : bool ,
394
382
) -> output:: Count {
395
- progress . inc ( ) ;
383
+ objects . fetch_add ( 1 , Ordering :: Relaxed ) ;
396
384
statistics. expanded_objects += 1 ;
397
385
output:: Count {
398
386
id : id. to_owned ( ) ,
0 commit comments