@@ -102,6 +102,39 @@ mod blocking_and_async_io {
102
102
) ?;
103
103
Ok ( ( ) )
104
104
}
105
+ fn check_fetch_output (
106
+ repo : & gix:: Repository ,
107
+ out : gix:: remote:: fetch:: Outcome ,
108
+ expected_count : usize ,
109
+ ) -> gix_testtools:: Result {
110
+ for local_tracking_branch_name in out. ref_map . mappings . into_iter ( ) . filter_map ( |m| m. local ) {
111
+ let r = repo. find_reference ( & local_tracking_branch_name) ?;
112
+ r. id ( )
113
+ . object ( )
114
+ . expect ( "object should be present after fetching, triggering pack refreshes works" ) ;
115
+ repo. head_ref ( ) ?. unwrap ( ) . set_target_id ( r. id ( ) , "post fetch" ) ?;
116
+ }
117
+ check_odb_accessability ( repo, expected_count) ?;
118
+ Ok ( ( ) )
119
+ }
120
+ fn check_odb_accessability ( repo : & gix:: Repository , expected_count : usize ) -> gix_testtools:: Result {
121
+ let mut count_unique = 0 ;
122
+ // TODO: somehow there is a lot of duplication when receiving objects.
123
+ let mut seen = gix_hashtable:: HashSet :: default ( ) ;
124
+ for id in repo. objects . iter ( ) ? {
125
+ let id = id?;
126
+ if !seen. insert ( id) {
127
+ continue ;
128
+ }
129
+ let _obj = repo. find_object ( id) ?;
130
+ count_unique += 1 ;
131
+ }
132
+ assert_eq ! (
133
+ count_unique, expected_count,
134
+ "Each round we receive exactly one commit, effectively"
135
+ ) ;
136
+ Ok ( ( ) )
137
+ }
105
138
for max_packs in 1 ..=3 {
106
139
let remote_dir = tempfile:: tempdir ( ) ?;
107
140
let mut remote_repo = gix:: init_bare ( remote_dir. path ( ) ) ?;
@@ -128,25 +161,31 @@ mod blocking_and_async_io {
128
161
Fetch ,
129
162
)
130
163
. expect ( "remote is configured after clone" ) ?;
131
- for _round_to_create_pack in 1 ..12 {
164
+ let Slots :: AsNeededByDiskState {
165
+ multiplier : _,
166
+ minimum : minimum_slots,
167
+ } = Slots :: default ( )
168
+ else {
169
+ unreachable ! ( "The default for slotmappings is dynamic" ) ;
170
+ } ;
171
+ let one_more_than_minimum = minimum_slots + 1 ;
172
+ for round_to_create_pack in 1 ..one_more_than_minimum {
173
+ let expected_object_count = round_to_create_pack + 1 + 1 /* first commit + tree */ ;
132
174
create_empty_commit ( & remote_repo) ?;
133
175
match remote
134
176
. connect ( Fetch ) ?
135
177
. prepare_fetch ( gix:: progress:: Discard , Default :: default ( ) ) ?
136
178
. receive ( gix:: progress:: Discard , & IS_INTERRUPTED )
137
179
{
138
- Ok ( out) => {
139
- for local_tracking_branch_name in out . ref_map . mappings . into_iter ( ) . filter_map ( |m| m . local ) {
140
- let r = local_repo . find_reference ( & local_tracking_branch_name ) ? ;
141
- r . id ( )
142
- . object ( )
143
- . expect ( "object should be present after fetching, triggering pack refreshes works" ) ;
144
- local_repo. head_ref ( ) ? . unwrap ( ) . set_target_id ( r . id ( ) , "post fetch" ) ?;
145
- }
180
+ Ok ( out) => check_fetch_output ( & local_repo , out , expected_object_count ) ? ,
181
+ Err ( err ) => {
182
+ assert ! ( err
183
+ . to_string ( )
184
+ . starts_with ( "The slotmap turned out to be too small with " ) ) ;
185
+ // But opening a new repo will always be able to read all objects.
186
+ let local_repo = gix :: open_opts ( local_repo. path ( ) , gix :: open :: Options :: isolated ( ) ) ?;
187
+ check_odb_accessability ( & local_repo , expected_object_count ) ? ;
146
188
}
147
- Err ( err) => assert ! ( err
148
- . to_string( )
149
- . starts_with( "The slotmap turned out to be too small with " ) ) ,
150
189
}
151
190
}
152
191
}
0 commit comments