@@ -432,6 +432,135 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
432432 return rc ;
433433}
434434
435+ static int smc_write_space (struct smc_connection * conn )
436+ {
437+ int buffer_len = conn -> peer_rmbe_size ;
438+ union smc_host_cursor prod ;
439+ union smc_host_cursor cons ;
440+ int space ;
441+
442+ smc_curs_copy (& prod , & conn -> local_tx_ctrl .prod , conn );
443+ smc_curs_copy (& cons , & conn -> local_rx_ctrl .cons , conn );
444+ /* determine rx_buf space */
445+ space = buffer_len - smc_curs_diff (buffer_len , & cons , & prod );
446+ return space ;
447+ }
448+
449+ static int smc_switch_cursor (struct smc_sock * smc )
450+ {
451+ struct smc_connection * conn = & smc -> conn ;
452+ union smc_host_cursor cons , fin ;
453+ int rc = 0 ;
454+ int diff ;
455+
456+ smc_curs_copy (& conn -> tx_curs_sent , & conn -> tx_curs_fin , conn );
457+ smc_curs_copy (& fin , & conn -> local_tx_ctrl_fin , conn );
458+ /* set prod cursor to old state, enforce tx_rdma_writes() */
459+ smc_curs_copy (& conn -> local_tx_ctrl .prod , & fin , conn );
460+ smc_curs_copy (& cons , & conn -> local_rx_ctrl .cons , conn );
461+
462+ if (smc_curs_comp (conn -> peer_rmbe_size , & cons , & fin ) < 0 ) {
463+ /* cons cursor advanced more than fin, and prod was set
464+ * fin above, so now prod is smaller than cons. Fix that.
465+ */
466+ diff = smc_curs_diff (conn -> peer_rmbe_size , & fin , & cons );
467+ smc_curs_add (conn -> sndbuf_desc -> len ,
468+ & conn -> tx_curs_sent , diff );
469+ smc_curs_add (conn -> sndbuf_desc -> len ,
470+ & conn -> tx_curs_fin , diff );
471+
472+ smp_mb__before_atomic ();
473+ atomic_add (diff , & conn -> sndbuf_space );
474+ smp_mb__after_atomic ();
475+
476+ smc_curs_add (conn -> peer_rmbe_size ,
477+ & conn -> local_tx_ctrl .prod , diff );
478+ smc_curs_add (conn -> peer_rmbe_size ,
479+ & conn -> local_tx_ctrl_fin , diff );
480+ }
481+ /* recalculate, value is used by tx_rdma_writes() */
482+ atomic_set (& smc -> conn .peer_rmbe_space , smc_write_space (conn ));
483+
484+ if (smc -> sk .sk_state != SMC_INIT &&
485+ smc -> sk .sk_state != SMC_CLOSED ) {
486+ /* tbd: call rc = smc_cdc_get_slot_and_msg_send(conn); */
487+ if (!rc ) {
488+ schedule_delayed_work (& conn -> tx_work , 0 );
489+ smc -> sk .sk_data_ready (& smc -> sk );
490+ }
491+ }
492+ return rc ;
493+ }
494+
495+ struct smc_link * smc_switch_conns (struct smc_link_group * lgr ,
496+ struct smc_link * from_lnk , bool is_dev_err )
497+ {
498+ struct smc_link * to_lnk = NULL ;
499+ struct smc_connection * conn ;
500+ struct smc_sock * smc ;
501+ struct rb_node * node ;
502+ int i , rc = 0 ;
503+
504+ /* link is inactive, wake up tx waiters */
505+ smc_wr_wakeup_tx_wait (from_lnk );
506+
507+ for (i = 0 ; i < SMC_LINKS_PER_LGR_MAX ; i ++ ) {
508+ if (lgr -> lnk [i ].state != SMC_LNK_ACTIVE ||
509+ i == from_lnk -> link_idx )
510+ continue ;
511+ if (is_dev_err && from_lnk -> smcibdev == lgr -> lnk [i ].smcibdev &&
512+ from_lnk -> ibport == lgr -> lnk [i ].ibport ) {
513+ continue ;
514+ }
515+ to_lnk = & lgr -> lnk [i ];
516+ break ;
517+ }
518+ if (!to_lnk ) {
519+ smc_lgr_terminate_sched (lgr );
520+ return NULL ;
521+ }
522+ again :
523+ read_lock_bh (& lgr -> conns_lock );
524+ for (node = rb_first (& lgr -> conns_all ); node ; node = rb_next (node )) {
525+ conn = rb_entry (node , struct smc_connection , alert_node );
526+ if (conn -> lnk != from_lnk )
527+ continue ;
528+ smc = container_of (conn , struct smc_sock , conn );
529+ /* conn->lnk not yet set in SMC_INIT state */
530+ if (smc -> sk .sk_state == SMC_INIT )
531+ continue ;
532+ if (smc -> sk .sk_state == SMC_CLOSED ||
533+ smc -> sk .sk_state == SMC_PEERCLOSEWAIT1 ||
534+ smc -> sk .sk_state == SMC_PEERCLOSEWAIT2 ||
535+ smc -> sk .sk_state == SMC_APPFINCLOSEWAIT ||
536+ smc -> sk .sk_state == SMC_APPCLOSEWAIT1 ||
537+ smc -> sk .sk_state == SMC_APPCLOSEWAIT2 ||
538+ smc -> sk .sk_state == SMC_PEERFINCLOSEWAIT ||
539+ smc -> sk .sk_state == SMC_PEERABORTWAIT ||
540+ smc -> sk .sk_state == SMC_PROCESSABORT ) {
541+ spin_lock_bh (& conn -> send_lock );
542+ conn -> lnk = to_lnk ;
543+ spin_unlock_bh (& conn -> send_lock );
544+ continue ;
545+ }
546+ sock_hold (& smc -> sk );
547+ read_unlock_bh (& lgr -> conns_lock );
548+ /* avoid race with smcr_tx_sndbuf_nonempty() */
549+ spin_lock_bh (& conn -> send_lock );
550+ conn -> lnk = to_lnk ;
551+ rc = smc_switch_cursor (smc );
552+ spin_unlock_bh (& conn -> send_lock );
553+ sock_put (& smc -> sk );
554+ if (rc ) {
555+ smcr_link_down_cond_sched (to_lnk );
556+ return NULL ;
557+ }
558+ goto again ;
559+ }
560+ read_unlock_bh (& lgr -> conns_lock );
561+ return to_lnk ;
562+ }
563+
435564static void smcr_buf_unuse (struct smc_buf_desc * rmb_desc ,
436565 struct smc_link_group * lgr )
437566{
@@ -943,8 +1072,7 @@ static void smcr_link_down(struct smc_link *lnk)
9431072 return ;
9441073
9451074 smc_ib_modify_qp_reset (lnk );
946- to_lnk = NULL ;
947- /* tbd: call to_lnk = smc_switch_conns(lgr, lnk, true); */
1075+ to_lnk = smc_switch_conns (lgr , lnk , true);
9481076 if (!to_lnk ) { /* no backup link available */
9491077 smcr_link_clear (lnk );
9501078 return ;
0 commit comments