@@ -93,6 +93,8 @@ struct pcpu_secy_stats {
93
93
* @secys: linked list of SecY's on the underlying device
94
94
* @gro_cells: pointer to the Generic Receive Offload cell
95
95
* @offload: status of offloading on the MACsec device
96
+ * @insert_tx_tag: when offloading, device requires to insert an
97
+ * additional tag
96
98
*/
97
99
struct macsec_dev {
98
100
struct macsec_secy secy ;
@@ -102,6 +104,7 @@ struct macsec_dev {
102
104
struct list_head secys ;
103
105
struct gro_cells gro_cells ;
104
106
enum macsec_offload offload ;
107
+ bool insert_tx_tag ;
105
108
};
106
109
107
110
/**
@@ -604,26 +607,11 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
604
607
return ERR_PTR (- EINVAL );
605
608
}
606
609
607
- if (unlikely (skb_headroom (skb ) < MACSEC_NEEDED_HEADROOM ||
608
- skb_tailroom (skb ) < MACSEC_NEEDED_TAILROOM )) {
609
- struct sk_buff * nskb = skb_copy_expand (skb ,
610
- MACSEC_NEEDED_HEADROOM ,
611
- MACSEC_NEEDED_TAILROOM ,
612
- GFP_ATOMIC );
613
- if (likely (nskb )) {
614
- consume_skb (skb );
615
- skb = nskb ;
616
- } else {
617
- macsec_txsa_put (tx_sa );
618
- kfree_skb (skb );
619
- return ERR_PTR (- ENOMEM );
620
- }
621
- } else {
622
- skb = skb_unshare (skb , GFP_ATOMIC );
623
- if (!skb ) {
624
- macsec_txsa_put (tx_sa );
625
- return ERR_PTR (- ENOMEM );
626
- }
610
+ ret = skb_ensure_writable_head_tail (skb , dev );
611
+ if (unlikely (ret < 0 )) {
612
+ macsec_txsa_put (tx_sa );
613
+ kfree_skb (skb );
614
+ return ERR_PTR (ret );
627
615
}
628
616
629
617
unprotected_len = skb -> len ;
@@ -2583,6 +2571,33 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
2583
2571
return false;
2584
2572
}
2585
2573
2574
+ static bool macsec_needs_tx_tag (struct macsec_dev * macsec ,
2575
+ const struct macsec_ops * ops )
2576
+ {
2577
+ return macsec -> offload == MACSEC_OFFLOAD_PHY &&
2578
+ ops -> mdo_insert_tx_tag ;
2579
+ }
2580
+
2581
+ static void macsec_set_head_tail_room (struct net_device * dev )
2582
+ {
2583
+ struct macsec_dev * macsec = macsec_priv (dev );
2584
+ struct net_device * real_dev = macsec -> real_dev ;
2585
+ int needed_headroom , needed_tailroom ;
2586
+ const struct macsec_ops * ops ;
2587
+
2588
+ ops = macsec_get_ops (macsec , NULL );
2589
+ if (ops ) {
2590
+ needed_headroom = ops -> needed_headroom ;
2591
+ needed_tailroom = ops -> needed_tailroom ;
2592
+ } else {
2593
+ needed_headroom = MACSEC_NEEDED_HEADROOM ;
2594
+ needed_tailroom = MACSEC_NEEDED_TAILROOM ;
2595
+ }
2596
+
2597
+ dev -> needed_headroom = real_dev -> needed_headroom + needed_headroom ;
2598
+ dev -> needed_tailroom = real_dev -> needed_tailroom + needed_tailroom ;
2599
+ }
2600
+
2586
2601
static int macsec_update_offload (struct net_device * dev , enum macsec_offload offload )
2587
2602
{
2588
2603
enum macsec_offload prev_offload ;
@@ -2620,8 +2635,13 @@ static int macsec_update_offload(struct net_device *dev, enum macsec_offload off
2620
2635
ctx .secy = & macsec -> secy ;
2621
2636
ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload (ops -> mdo_del_secy , & ctx )
2622
2637
: macsec_offload (ops -> mdo_add_secy , & ctx );
2623
- if (ret )
2638
+ if (ret ) {
2624
2639
macsec -> offload = prev_offload ;
2640
+ return ret ;
2641
+ }
2642
+
2643
+ macsec_set_head_tail_room (dev );
2644
+ macsec -> insert_tx_tag = macsec_needs_tx_tag (macsec , ops );
2625
2645
2626
2646
return ret ;
2627
2647
}
@@ -3379,6 +3399,40 @@ static struct genl_family macsec_fam __ro_after_init = {
3379
3399
.resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1 ,
3380
3400
};
3381
3401
3402
+ static struct sk_buff * macsec_insert_tx_tag (struct sk_buff * skb ,
3403
+ struct net_device * dev )
3404
+ {
3405
+ struct macsec_dev * macsec = macsec_priv (dev );
3406
+ const struct macsec_ops * ops ;
3407
+ struct phy_device * phydev ;
3408
+ struct macsec_context ctx ;
3409
+ int skb_final_len ;
3410
+ int err ;
3411
+
3412
+ ops = macsec_get_ops (macsec , & ctx );
3413
+ skb_final_len = skb -> len - ETH_HLEN + ops -> needed_headroom +
3414
+ ops -> needed_tailroom ;
3415
+ if (unlikely (skb_final_len > macsec -> real_dev -> mtu )) {
3416
+ err = - EINVAL ;
3417
+ goto cleanup ;
3418
+ }
3419
+
3420
+ phydev = macsec -> real_dev -> phydev ;
3421
+
3422
+ err = skb_ensure_writable_head_tail (skb , dev );
3423
+ if (unlikely (err < 0 ))
3424
+ goto cleanup ;
3425
+
3426
+ err = ops -> mdo_insert_tx_tag (phydev , skb );
3427
+ if (unlikely (err ))
3428
+ goto cleanup ;
3429
+
3430
+ return skb ;
3431
+ cleanup :
3432
+ kfree_skb (skb );
3433
+ return ERR_PTR (err );
3434
+ }
3435
+
3382
3436
static netdev_tx_t macsec_start_xmit (struct sk_buff * skb ,
3383
3437
struct net_device * dev )
3384
3438
{
@@ -3393,6 +3447,15 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3393
3447
skb_dst_drop (skb );
3394
3448
dst_hold (& md_dst -> dst );
3395
3449
skb_dst_set (skb , & md_dst -> dst );
3450
+
3451
+ if (macsec -> insert_tx_tag ) {
3452
+ skb = macsec_insert_tx_tag (skb , dev );
3453
+ if (IS_ERR (skb )) {
3454
+ DEV_STATS_INC (dev , tx_dropped );
3455
+ return NETDEV_TX_OK ;
3456
+ }
3457
+ }
3458
+
3396
3459
skb -> dev = macsec -> real_dev ;
3397
3460
return dev_queue_xmit (skb );
3398
3461
}
@@ -3454,10 +3517,7 @@ static int macsec_dev_init(struct net_device *dev)
3454
3517
dev -> features = real_dev -> features & MACSEC_FEATURES ;
3455
3518
dev -> features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE ;
3456
3519
3457
- dev -> needed_headroom = real_dev -> needed_headroom +
3458
- MACSEC_NEEDED_HEADROOM ;
3459
- dev -> needed_tailroom = real_dev -> needed_tailroom +
3460
- MACSEC_NEEDED_TAILROOM ;
3520
+ macsec_set_head_tail_room (dev );
3461
3521
3462
3522
if (is_zero_ether_addr (dev -> dev_addr ))
3463
3523
eth_hw_addr_inherit (dev , real_dev );
@@ -3604,21 +3664,19 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
3604
3664
struct macsec_dev * macsec = macsec_priv (dev );
3605
3665
struct net_device * real_dev = macsec -> real_dev ;
3606
3666
struct sockaddr * addr = p ;
3667
+ u8 old_addr [ETH_ALEN ];
3607
3668
int err ;
3608
3669
3609
3670
if (!is_valid_ether_addr (addr -> sa_data ))
3610
3671
return - EADDRNOTAVAIL ;
3611
3672
3612
- if (!(dev -> flags & IFF_UP ))
3613
- goto out ;
3614
-
3615
- err = dev_uc_add (real_dev , addr -> sa_data );
3616
- if (err < 0 )
3617
- return err ;
3618
-
3619
- dev_uc_del (real_dev , dev -> dev_addr );
3673
+ if (dev -> flags & IFF_UP ) {
3674
+ err = dev_uc_add (real_dev , addr -> sa_data );
3675
+ if (err < 0 )
3676
+ return err ;
3677
+ }
3620
3678
3621
- out :
3679
+ ether_addr_copy ( old_addr , dev -> dev_addr );
3622
3680
eth_hw_addr_set (dev , addr -> sa_data );
3623
3681
3624
3682
/* If h/w offloading is available, propagate to the device */
@@ -3627,13 +3685,29 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
3627
3685
struct macsec_context ctx ;
3628
3686
3629
3687
ops = macsec_get_ops (macsec , & ctx );
3630
- if (ops ) {
3631
- ctx . secy = & macsec -> secy ;
3632
- macsec_offload ( ops -> mdo_upd_secy , & ctx ) ;
3688
+ if (! ops ) {
3689
+ err = - EOPNOTSUPP ;
3690
+ goto restore_old_addr ;
3633
3691
}
3692
+
3693
+ ctx .secy = & macsec -> secy ;
3694
+ err = macsec_offload (ops -> mdo_upd_secy , & ctx );
3695
+ if (err )
3696
+ goto restore_old_addr ;
3634
3697
}
3635
3698
3699
+ if (dev -> flags & IFF_UP )
3700
+ dev_uc_del (real_dev , old_addr );
3701
+
3636
3702
return 0 ;
3703
+
3704
+ restore_old_addr :
3705
+ if (dev -> flags & IFF_UP )
3706
+ dev_uc_del (real_dev , addr -> sa_data );
3707
+
3708
+ eth_hw_addr_set (dev , old_addr );
3709
+
3710
+ return err ;
3637
3711
}
3638
3712
3639
3713
static int macsec_change_mtu (struct net_device * dev , int new_mtu )
@@ -4126,6 +4200,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
4126
4200
err = macsec_offload (ops -> mdo_add_secy , & ctx );
4127
4201
if (err )
4128
4202
goto del_dev ;
4203
+
4204
+ macsec -> insert_tx_tag =
4205
+ macsec_needs_tx_tag (macsec , ops );
4129
4206
}
4130
4207
}
4131
4208
0 commit comments