@@ -88,6 +88,8 @@ static void netfs_free_read_request(struct work_struct *work)
88
88
if (rreq -> netfs_priv )
89
89
rreq -> netfs_ops -> cleanup (rreq -> mapping , rreq -> netfs_priv );
90
90
trace_netfs_rreq (rreq , netfs_rreq_trace_free );
91
+ if (rreq -> cache_resources .ops )
92
+ rreq -> cache_resources .ops -> end_operation (& rreq -> cache_resources );
91
93
kfree (rreq );
92
94
netfs_stat_d (& netfs_n_rh_rreq );
93
95
}
@@ -154,6 +156,34 @@ static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
154
156
iov_iter_zero (iov_iter_count (& iter ), & iter );
155
157
}
156
158
159
+ static void netfs_cache_read_terminated (void * priv , ssize_t transferred_or_error ,
160
+ bool was_async )
161
+ {
162
+ struct netfs_read_subrequest * subreq = priv ;
163
+
164
+ netfs_subreq_terminated (subreq , transferred_or_error , was_async );
165
+ }
166
+
167
+ /*
168
+ * Issue a read against the cache.
169
+ * - Eats the caller's ref on subreq.
170
+ */
171
+ static void netfs_read_from_cache (struct netfs_read_request * rreq ,
172
+ struct netfs_read_subrequest * subreq ,
173
+ bool seek_data )
174
+ {
175
+ struct netfs_cache_resources * cres = & rreq -> cache_resources ;
176
+ struct iov_iter iter ;
177
+
178
+ netfs_stat (& netfs_n_rh_read );
179
+ iov_iter_xarray (& iter , READ , & rreq -> mapping -> i_pages ,
180
+ subreq -> start + subreq -> transferred ,
181
+ subreq -> len - subreq -> transferred );
182
+
183
+ cres -> ops -> read (cres , subreq -> start , & iter , seek_data ,
184
+ netfs_cache_read_terminated , subreq );
185
+ }
186
+
157
187
/*
158
188
* Fill a subrequest region with zeroes.
159
189
*/
@@ -198,6 +228,141 @@ static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async
198
228
netfs_put_read_request (rreq , was_async );
199
229
}
200
230
231
+ /*
232
+ * Deal with the completion of writing the data to the cache. We have to clear
233
+ * the PG_fscache bits on the pages involved and release the caller's ref.
234
+ *
235
+ * May be called in softirq mode and we inherit a ref from the caller.
236
+ */
237
+ static void netfs_rreq_unmark_after_write (struct netfs_read_request * rreq ,
238
+ bool was_async )
239
+ {
240
+ struct netfs_read_subrequest * subreq ;
241
+ struct page * page ;
242
+ pgoff_t unlocked = 0 ;
243
+ bool have_unlocked = false;
244
+
245
+ rcu_read_lock ();
246
+
247
+ list_for_each_entry (subreq , & rreq -> subrequests , rreq_link ) {
248
+ XA_STATE (xas , & rreq -> mapping -> i_pages , subreq -> start / PAGE_SIZE );
249
+
250
+ xas_for_each (& xas , page , (subreq -> start + subreq -> len - 1 ) / PAGE_SIZE ) {
251
+ /* We might have multiple writes from the same huge
252
+ * page, but we mustn't unlock a page more than once.
253
+ */
254
+ if (have_unlocked && page -> index <= unlocked )
255
+ continue ;
256
+ unlocked = page -> index ;
257
+ end_page_fscache (page );
258
+ have_unlocked = true;
259
+ }
260
+ }
261
+
262
+ rcu_read_unlock ();
263
+ netfs_rreq_completed (rreq , was_async );
264
+ }
265
+
266
+ static void netfs_rreq_copy_terminated (void * priv , ssize_t transferred_or_error ,
267
+ bool was_async )
268
+ {
269
+ struct netfs_read_subrequest * subreq = priv ;
270
+ struct netfs_read_request * rreq = subreq -> rreq ;
271
+
272
+ if (IS_ERR_VALUE (transferred_or_error )) {
273
+ netfs_stat (& netfs_n_rh_write_failed );
274
+ } else {
275
+ netfs_stat (& netfs_n_rh_write_done );
276
+ }
277
+
278
+ trace_netfs_sreq (subreq , netfs_sreq_trace_write_term );
279
+
280
+ /* If we decrement nr_wr_ops to 0, the ref belongs to us. */
281
+ if (atomic_dec_and_test (& rreq -> nr_wr_ops ))
282
+ netfs_rreq_unmark_after_write (rreq , was_async );
283
+
284
+ netfs_put_subrequest (subreq , was_async );
285
+ }
286
+
287
+ /*
288
+ * Perform any outstanding writes to the cache. We inherit a ref from the
289
+ * caller.
290
+ */
291
+ static void netfs_rreq_do_write_to_cache (struct netfs_read_request * rreq )
292
+ {
293
+ struct netfs_cache_resources * cres = & rreq -> cache_resources ;
294
+ struct netfs_read_subrequest * subreq , * next , * p ;
295
+ struct iov_iter iter ;
296
+ int ret ;
297
+
298
+ trace_netfs_rreq (rreq , netfs_rreq_trace_write );
299
+
300
+ /* We don't want terminating writes trying to wake us up whilst we're
301
+ * still going through the list.
302
+ */
303
+ atomic_inc (& rreq -> nr_wr_ops );
304
+
305
+ list_for_each_entry_safe (subreq , p , & rreq -> subrequests , rreq_link ) {
306
+ if (!test_bit (NETFS_SREQ_WRITE_TO_CACHE , & subreq -> flags )) {
307
+ list_del_init (& subreq -> rreq_link );
308
+ netfs_put_subrequest (subreq , false);
309
+ }
310
+ }
311
+
312
+ list_for_each_entry (subreq , & rreq -> subrequests , rreq_link ) {
313
+ /* Amalgamate adjacent writes */
314
+ while (!list_is_last (& subreq -> rreq_link , & rreq -> subrequests )) {
315
+ next = list_next_entry (subreq , rreq_link );
316
+ if (next -> start != subreq -> start + subreq -> len )
317
+ break ;
318
+ subreq -> len += next -> len ;
319
+ list_del_init (& next -> rreq_link );
320
+ netfs_put_subrequest (next , false);
321
+ }
322
+
323
+ ret = cres -> ops -> prepare_write (cres , & subreq -> start , & subreq -> len ,
324
+ rreq -> i_size );
325
+ if (ret < 0 ) {
326
+ trace_netfs_sreq (subreq , netfs_sreq_trace_write_skip );
327
+ continue ;
328
+ }
329
+
330
+ iov_iter_xarray (& iter , WRITE , & rreq -> mapping -> i_pages ,
331
+ subreq -> start , subreq -> len );
332
+
333
+ atomic_inc (& rreq -> nr_wr_ops );
334
+ netfs_stat (& netfs_n_rh_write );
335
+ netfs_get_read_subrequest (subreq );
336
+ trace_netfs_sreq (subreq , netfs_sreq_trace_write );
337
+ cres -> ops -> write (cres , subreq -> start , & iter ,
338
+ netfs_rreq_copy_terminated , subreq );
339
+ }
340
+
341
+ /* If we decrement nr_wr_ops to 0, the usage ref belongs to us. */
342
+ if (atomic_dec_and_test (& rreq -> nr_wr_ops ))
343
+ netfs_rreq_unmark_after_write (rreq , false);
344
+ }
345
+
346
+ static void netfs_rreq_write_to_cache_work (struct work_struct * work )
347
+ {
348
+ struct netfs_read_request * rreq =
349
+ container_of (work , struct netfs_read_request , work );
350
+
351
+ netfs_rreq_do_write_to_cache (rreq );
352
+ }
353
+
354
+ static void netfs_rreq_write_to_cache (struct netfs_read_request * rreq ,
355
+ bool was_async )
356
+ {
357
+ if (was_async ) {
358
+ rreq -> work .func = netfs_rreq_write_to_cache_work ;
359
+ if (!queue_work (system_unbound_wq , & rreq -> work ))
360
+ BUG ();
361
+ } else {
362
+ netfs_rreq_do_write_to_cache (rreq );
363
+ }
364
+ }
365
+
201
366
/*
202
367
* Unlock the pages in a read operation. We need to set PG_fscache on any
203
368
* pages we're going to write back before we unlock them.
@@ -299,7 +464,10 @@ static void netfs_rreq_short_read(struct netfs_read_request *rreq,
299
464
300
465
netfs_get_read_subrequest (subreq );
301
466
atomic_inc (& rreq -> nr_rd_ops );
302
- netfs_read_from_server (rreq , subreq );
467
+ if (subreq -> source == NETFS_READ_FROM_CACHE )
468
+ netfs_read_from_cache (rreq , subreq , true);
469
+ else
470
+ netfs_read_from_server (rreq , subreq );
303
471
}
304
472
305
473
/*
@@ -344,6 +512,25 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq)
344
512
return false;
345
513
}
346
514
515
+ /*
516
+ * Check to see if the data read is still valid.
517
+ */
518
+ static void netfs_rreq_is_still_valid (struct netfs_read_request * rreq )
519
+ {
520
+ struct netfs_read_subrequest * subreq ;
521
+
522
+ if (!rreq -> netfs_ops -> is_still_valid ||
523
+ rreq -> netfs_ops -> is_still_valid (rreq ))
524
+ return ;
525
+
526
+ list_for_each_entry (subreq , & rreq -> subrequests , rreq_link ) {
527
+ if (subreq -> source == NETFS_READ_FROM_CACHE ) {
528
+ subreq -> error = - ESTALE ;
529
+ __set_bit (NETFS_RREQ_INCOMPLETE_IO , & rreq -> flags );
530
+ }
531
+ }
532
+ }
533
+
347
534
/*
348
535
* Assess the state of a read request and decide what to do next.
349
536
*
@@ -355,6 +542,8 @@ static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
355
542
trace_netfs_rreq (rreq , netfs_rreq_trace_assess );
356
543
357
544
again :
545
+ netfs_rreq_is_still_valid (rreq );
546
+
358
547
if (!test_bit (NETFS_RREQ_FAILED , & rreq -> flags ) &&
359
548
test_bit (NETFS_RREQ_INCOMPLETE_IO , & rreq -> flags )) {
360
549
if (netfs_rreq_perform_resubmissions (rreq ))
@@ -367,6 +556,9 @@ static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
367
556
clear_bit_unlock (NETFS_RREQ_IN_PROGRESS , & rreq -> flags );
368
557
wake_up_bit (& rreq -> flags , NETFS_RREQ_IN_PROGRESS );
369
558
559
+ if (test_bit (NETFS_RREQ_WRITE_TO_CACHE , & rreq -> flags ))
560
+ return netfs_rreq_write_to_cache (rreq , was_async );
561
+
370
562
netfs_rreq_completed (rreq , was_async );
371
563
}
372
564
@@ -504,7 +696,10 @@ static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequ
504
696
loff_t i_size )
505
697
{
506
698
struct netfs_read_request * rreq = subreq -> rreq ;
699
+ struct netfs_cache_resources * cres = & rreq -> cache_resources ;
507
700
701
+ if (cres -> ops )
702
+ return cres -> ops -> prepare_read (subreq , i_size );
508
703
if (subreq -> start >= rreq -> i_size )
509
704
return NETFS_FILL_WITH_ZEROES ;
510
705
return NETFS_DOWNLOAD_FROM_SERVER ;
@@ -595,6 +790,9 @@ static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
595
790
case NETFS_DOWNLOAD_FROM_SERVER :
596
791
netfs_read_from_server (rreq , subreq );
597
792
break ;
793
+ case NETFS_READ_FROM_CACHE :
794
+ netfs_read_from_cache (rreq , subreq , false);
795
+ break ;
598
796
default :
599
797
BUG ();
600
798
}
@@ -607,9 +805,23 @@ static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
607
805
return false;
608
806
}
609
807
808
+ static void netfs_cache_expand_readahead (struct netfs_read_request * rreq ,
809
+ loff_t * _start , size_t * _len , loff_t i_size )
810
+ {
811
+ struct netfs_cache_resources * cres = & rreq -> cache_resources ;
812
+
813
+ if (cres -> ops && cres -> ops -> expand_readahead )
814
+ cres -> ops -> expand_readahead (cres , _start , _len , i_size );
815
+ }
816
+
610
817
static void netfs_rreq_expand (struct netfs_read_request * rreq ,
611
818
struct readahead_control * ractl )
612
819
{
820
+ /* Give the cache a chance to change the request parameters. The
821
+ * resultant request must contain the original region.
822
+ */
823
+ netfs_cache_expand_readahead (rreq , & rreq -> start , & rreq -> len , rreq -> i_size );
824
+
613
825
/* Give the netfs a chance to change the request parameters. The
614
826
* resultant request must contain the original region.
615
827
*/
@@ -661,6 +873,7 @@ void netfs_readahead(struct readahead_control *ractl,
661
873
struct netfs_read_request * rreq ;
662
874
struct page * page ;
663
875
unsigned int debug_index = 0 ;
876
+ int ret ;
664
877
665
878
_enter ("%lx,%x" , readahead_index (ractl ), readahead_count (ractl ));
666
879
@@ -674,6 +887,12 @@ void netfs_readahead(struct readahead_control *ractl,
674
887
rreq -> start = readahead_pos (ractl );
675
888
rreq -> len = readahead_length (ractl );
676
889
890
+ if (ops -> begin_cache_operation ) {
891
+ ret = ops -> begin_cache_operation (rreq );
892
+ if (ret == - ENOMEM || ret == - EINTR || ret == - ERESTARTSYS )
893
+ goto cleanup_free ;
894
+ }
895
+
677
896
netfs_stat (& netfs_n_rh_readahead );
678
897
trace_netfs_read (rreq , readahead_pos (ractl ), readahead_length (ractl ),
679
898
netfs_read_trace_readahead );
@@ -698,6 +917,9 @@ void netfs_readahead(struct readahead_control *ractl,
698
917
netfs_rreq_assess (rreq , false);
699
918
return ;
700
919
920
+ cleanup_free :
921
+ netfs_put_read_request (rreq , false);
922
+ return ;
701
923
cleanup :
702
924
if (netfs_priv )
703
925
ops -> cleanup (ractl -> mapping , netfs_priv );
@@ -744,6 +966,14 @@ int netfs_readpage(struct file *file,
744
966
rreq -> start = page_index (page ) * PAGE_SIZE ;
745
967
rreq -> len = thp_size (page );
746
968
969
+ if (ops -> begin_cache_operation ) {
970
+ ret = ops -> begin_cache_operation (rreq );
971
+ if (ret == - ENOMEM || ret == - EINTR || ret == - ERESTARTSYS ) {
972
+ unlock_page (page );
973
+ goto out ;
974
+ }
975
+ }
976
+
747
977
netfs_stat (& netfs_n_rh_readpage );
748
978
trace_netfs_read (rreq , rreq -> start , rreq -> len , netfs_read_trace_readpage );
749
979
@@ -768,6 +998,7 @@ int netfs_readpage(struct file *file,
768
998
ret = rreq -> error ;
769
999
if (ret == 0 && rreq -> submitted < rreq -> len )
770
1000
ret = - EIO ;
1001
+ out :
771
1002
netfs_put_read_request (rreq , false);
772
1003
return ret ;
773
1004
}
@@ -873,6 +1104,12 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
873
1104
__set_bit (NETFS_RREQ_NO_UNLOCK_PAGE , & rreq -> flags );
874
1105
netfs_priv = NULL ;
875
1106
1107
+ if (ops -> begin_cache_operation ) {
1108
+ ret = ops -> begin_cache_operation (rreq );
1109
+ if (ret == - ENOMEM || ret == - EINTR || ret == - ERESTARTSYS )
1110
+ goto error_put ;
1111
+ }
1112
+
876
1113
netfs_stat (& netfs_n_rh_write_begin );
877
1114
trace_netfs_read (rreq , pos , len , netfs_read_trace_write_begin );
878
1115
0 commit comments