1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
|
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2024, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file ice_lib.h
* @brief header for generic device and sysctl functions
*
* Contains definitions and function declarations for the ice_lib.c file. It
* does not depend on the iflib networking stack.
*/
#ifndef _ICE_LIB_H_
#define _ICE_LIB_H_
/* include kernel options first */
#include "ice_opts.h"
#include <sys/types.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/ethernet.h>
#include <net/if_types.h>
#include <sys/bitstring.h>
#include "ice_dcb.h"
#include "ice_type.h"
#include "ice_common.h"
#include "ice_flow.h"
#include "ice_sched.h"
#include "ice_resmgr.h"
#include "ice_rdma_internal.h"
#include "ice_rss.h"
/* Hide debug sysctls unless INVARIANTS is enabled */
#ifdef INVARIANTS
#define ICE_CTLFLAG_DEBUG 0
#else
#define ICE_CTLFLAG_DEBUG CTLFLAG_SKIP
#endif
/**
* for_each_set_bit - For loop over each set bit in a bit string
* @bit: storage for the bit index
* @data: address of data block to loop over
* @nbits: maximum number of bits to loop over
*
* macro to create a for loop over a bit string, which runs the body once for
* each bit that is set in the string. The bit variable will be set to the
* index of each set bit in the string, with zero representing the first bit.
*/
#define for_each_set_bit(bit, data, nbits) \
for (bit_ffs((bitstr_t *)(data), (nbits), &(bit)); \
(bit) != -1; \
bit_ffs_at((bitstr_t *)(data), (bit) + 1, (nbits), &(bit)))
/**
* @var broadcastaddr
* @brief broadcast MAC address
*
* constant defining the broadcast MAC address, used for programming the
* broadcast address as a MAC filter for the PF VSI.
*/
static const u8 broadcastaddr[ETHER_ADDR_LEN] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
MALLOC_DECLARE(M_ICE);
extern const char ice_driver_version[];
extern const uint8_t ice_major_version;
extern const uint8_t ice_minor_version;
extern const uint8_t ice_patch_version;
extern const uint8_t ice_rc_version;
/* global sysctl indicating whether the Tx FC filter should be enabled */
extern bool ice_enable_tx_fc_filter;
/* global sysctl indicating whether the Tx LLDP filter should be enabled */
extern bool ice_enable_tx_lldp_filter;
/* global sysctl indicating whether FW health status events should be enabled */
extern bool ice_enable_health_events;
/* global sysctl indicating whether to enable 5-layer scheduler topology */
extern bool ice_tx_balance_en;
/**
* @struct ice_bar_info
* @brief PCI BAR mapping information
*
* Contains data about a PCI BAR that the driver has mapped for use.
*/
struct ice_bar_info {
struct resource *res;
bus_space_tag_t tag;
bus_space_handle_t handle;
bus_size_t size;
int rid;
};
/* Alignment for queues */
#define DBA_ALIGN 128
/* Maximum TSO size is (256K)-1 */
#define ICE_TSO_SIZE ((256*1024) - 1)
/* Minimum size for TSO MSS */
#define ICE_MIN_TSO_MSS 64
#define ICE_MAX_TX_SEGS 8
#define ICE_MAX_TSO_SEGS 128
#define ICE_MAX_DMA_SEG_SIZE ((16*1024) - 1)
#define ICE_MAX_RX_SEGS 5
#define ICE_MAX_TSO_HDR_SEGS 3
#define ICE_MSIX_BAR 3
#define ICE_MAX_MSIX_VECTORS (GLINT_DYN_CTL_MAX_INDEX + 1)
#define ICE_DEFAULT_DESC_COUNT 1024
#define ICE_MAX_DESC_COUNT 8160
#define ICE_MIN_DESC_COUNT 64
#define ICE_DESC_COUNT_INCR 32
/* List of hardware offloads we support */
#define ICE_CSUM_OFFLOAD (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | CSUM_IP_SCTP | \
CSUM_IP6_TCP| CSUM_IP6_UDP | CSUM_IP6_SCTP | \
CSUM_IP_TSO | CSUM_IP6_TSO)
/* Macros to decide what kind of hardware offload to enable */
#define ICE_CSUM_TCP (CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP)
#define ICE_CSUM_UDP (CSUM_IP_UDP|CSUM_IP6_UDP)
#define ICE_CSUM_SCTP (CSUM_IP_SCTP|CSUM_IP6_SCTP)
#define ICE_CSUM_IP (CSUM_IP|CSUM_IP_TSO)
/* List of known RX CSUM offload flags */
#define ICE_RX_CSUM_FLAGS (CSUM_L3_CALC | CSUM_L3_VALID | CSUM_L4_CALC | \
CSUM_L4_VALID | CSUM_L5_CALC | CSUM_L5_VALID | \
CSUM_COALESCED)
/* List of interface capabilities supported by ice hardware */
#define ICE_FULL_CAPS \
(IFCAP_TSO4 | IFCAP_TSO6 | \
IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | \
IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | \
IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU | IFCAP_LRO)
/* Safe mode disables support for hardware checksums and TSO */
#define ICE_SAFE_CAPS \
(ICE_FULL_CAPS & ~(IFCAP_HWCSUM | IFCAP_TSO | \
IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM))
#define ICE_CAPS(sc) \
(ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE) ? ICE_SAFE_CAPS : ICE_FULL_CAPS)
/**
* ICE_NVM_ACCESS
* @brief Private ioctl command number for NVM access ioctls
*
* The ioctl command number used by NVM update for accessing the driver for
* NVM access commands.
*/
#define ICE_NVM_ACCESS \
(((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5)
/**
* ICE_DEBUG_DUMP
* @brief Private ioctl command number for retrieving debug dump data
*
* The ioctl command number used by a userspace tool for accessing the driver for
* getting debug dump data from the firmware.
*/
#define ICE_DEBUG_DUMP \
(((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 6)
#define ICE_AQ_LEN 1023
#define ICE_MBXQ_LEN 512
#define ICE_SBQ_LEN 512
#define ICE_CTRLQ_WORK_LIMIT 256
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
/* wait up to 50 microseconds for queue state change */
#define ICE_Q_WAIT_RETRY_LIMIT 5
#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
ICE_AQ_VSI_UP_TABLE_UP##i##_M)
/*
* For now, set this to the hardware maximum. Each function gets a smaller
* number assigned to it in hw->func_caps.guar_num_vsi, though there
* appears to be no guarantee that is the maximum number that a function
* can use.
*/
#define ICE_MAX_VSI_AVAILABLE 768
/* Maximum size of a single frame (for Tx and Rx) */
#define ICE_MAX_FRAME_SIZE ICE_AQ_SET_MAC_FRAME_SIZE_MAX
/* Maximum MTU size */
#define ICE_MAX_MTU (ICE_MAX_FRAME_SIZE - \
ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
/*
* Hardware requires that TSO packets have an segment size of at least 64
* bytes. To avoid sending bad frames to the hardware, the driver forces the
* MSS for all TSO packets to have a segment size of at least 64 bytes.
*
* However, if the MTU is reduced below a certain size, then the resulting
* larger MSS can result in transmitting segmented frames with a packet size
* larger than the MTU.
*
* Avoid this by preventing the MTU from being lowered below this limit.
* Alternative solutions require changing the TCP stack to disable offloading
* the segmentation when the requested segment size goes below 64 bytes.
*/
#define ICE_MIN_MTU 112
/*
* The default number of queues reserved for a VF is 4, according to the
* AVF Base Mode specification.
*/
#define ICE_DEFAULT_VF_QUEUES 4
/*
* An invalid VSI number to indicate that mirroring should be disabled.
*/
#define ICE_INVALID_MIRROR_VSI ((u16)-1)
/*
* The maximum number of RX queues allowed per TC in a VSI.
*/
#define ICE_MAX_RXQS_PER_TC 256
/*
* There are three settings that can be updated independently or
* altogether: Link speed, FEC, and Flow Control. These macros allow
* the caller to specify which setting(s) to update.
*/
#define ICE_APPLY_LS BIT(0)
#define ICE_APPLY_FEC BIT(1)
#define ICE_APPLY_FC BIT(2)
#define ICE_APPLY_LS_FEC (ICE_APPLY_LS | ICE_APPLY_FEC)
#define ICE_APPLY_LS_FC (ICE_APPLY_LS | ICE_APPLY_FC)
#define ICE_APPLY_FEC_FC (ICE_APPLY_FEC | ICE_APPLY_FC)
#define ICE_APPLY_LS_FEC_FC (ICE_APPLY_LS_FEC | ICE_APPLY_FC)
/*
* Mask of valid flags that can be used as an input for the
* advertise_speed sysctl.
*/
#define ICE_SYSCTL_SPEEDS_VALID_RANGE 0xFFF
/**
* @enum ice_dyn_idx_t
* @brief Dynamic Control ITR indexes
*
* This enum matches hardware bits and is meant to be used by DYN_CTLN
* registers and QINT registers or more generally anywhere in the manual
* mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
* register but instead is a special value meaning "don't update" ITR0/1/2.
*/
enum ice_dyn_idx_t {
ICE_IDX_ITR0 = 0,
ICE_IDX_ITR1 = 1,
ICE_IDX_ITR2 = 2,
ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
};
/* By convention ITR0 is used for RX, and ITR1 is used for TX */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
#define ICE_ITR_MAX 8160
/* Define the default Tx and Rx ITR as 50us (translates to ~20k int/sec max) */
#define ICE_DFLT_TX_ITR 50
#define ICE_DFLT_RX_ITR 50
/* RS FEC register values */
#define ICE_RS_FEC_REG_SHIFT 2
#define ICE_RS_FEC_RECV_ID_SHIFT 4
#define ICE_RS_FEC_CORR_LOW_REG_PORT0 (0x02 << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_CORR_HIGH_REG_PORT0 (0x03 << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_UNCORR_LOW_REG_PORT0 (0x04 << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_UNCORR_HIGH_REG_PORT0 (0x05 << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_CORR_LOW_REG_PORT1 (0x42 << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_CORR_HIGH_REG_PORT1 (0x43 << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_UNCORR_LOW_REG_PORT1 (0x44 << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_UNCORR_HIGH_REG_PORT1 (0x45 << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_CORR_LOW_REG_PORT2 (0x4A << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_CORR_HIGH_REG_PORT2 (0x4B << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_UNCORR_LOW_REG_PORT2 (0x4C << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_UNCORR_HIGH_REG_PORT2 (0x4D << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_CORR_LOW_REG_PORT3 (0x52 << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_CORR_HIGH_REG_PORT3 (0x53 << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_UNCORR_LOW_REG_PORT3 (0x54 << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_UNCORR_HIGH_REG_PORT3 (0x55 << ICE_RS_FEC_REG_SHIFT)
#define ICE_RS_FEC_RECEIVER_ID_PCS0 (0x33 << ICE_RS_FEC_RECV_ID_SHIFT)
#define ICE_RS_FEC_RECEIVER_ID_PCS1 (0x34 << ICE_RS_FEC_RECV_ID_SHIFT)
/**
* ice_itr_to_reg - Convert an ITR setting into its register equivalent
* @hw: The device HW structure
* @itr_setting: the ITR setting to convert
*
* Based on the hardware ITR granularity, convert an ITR setting into the
* correct value to prepare programming to the HW.
*/
static inline u16 ice_itr_to_reg(struct ice_hw *hw, u16 itr_setting)
{
return itr_setting / hw->itr_gran;
}
/**
* @enum ice_rx_dtype
* @brief DTYPE header split options
*
* This enum matches the Rx context bits to define whether header split is
* enabled or not.
*/
enum ice_rx_dtype {
ICE_RX_DTYPE_NO_SPLIT = 0,
ICE_RX_DTYPE_HEADER_SPLIT = 1,
ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
};
/* Strings used for displaying FEC mode
*
* Use ice_fec_str() to get these unless these need to be embedded in a
* string constant.
*/
#define ICE_FEC_STRING_AUTO "Auto"
#define ICE_FEC_STRING_RS "RS-FEC"
#define ICE_FEC_STRING_BASER "FC-FEC/BASE-R"
#define ICE_FEC_STRING_NONE "None"
#define ICE_FEC_STRING_DIS_AUTO "Auto (w/ No-FEC)"
/* Strings used for displaying Flow Control mode
*
* Use ice_fc_str() to get these unless these need to be embedded in a
* string constant.
*/
#define ICE_FC_STRING_FULL "Full"
#define ICE_FC_STRING_TX "Tx"
#define ICE_FC_STRING_RX "Rx"
#define ICE_FC_STRING_NONE "None"
/*
* The number of times the ice_handle_i2c_req function will retry reading
* I2C data via the Admin Queue before returning EBUSY.
*/
#define ICE_I2C_MAX_RETRIES 10
/*
* The Get Link Status AQ command and other link commands can return
* EAGAIN, indicating that the FW Link Management engine is busy.
* Define the number of times that the driver should retry sending these
* commands and the amount of time it should wait between those retries
* (in milliseconds) here.
*/
#define ICE_LINK_AQ_MAX_RETRIES 10
#define ICE_LINK_RETRY_DELAY 17
/*
* The Start LLDP Agent AQ command will fail if it's sent too soon after
* the LLDP agent is stopped. The period between the stop and start
* commands must currently be at least 2 seconds.
*/
#define ICE_START_LLDP_RETRY_WAIT (2 * hz)
/*
* Only certain clusters are valid for certain devices for the FW debug dump
* functionality, so define masks of those here.
*/
#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E810 0x4001AF
#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E830 0x1AF
struct ice_softc;
/**
* @enum ice_rx_cso_stat
* @brief software checksum offload statistics
*
* Enumeration of possible checksum offload statistics captured by software
* during the Rx path.
*/
enum ice_rx_cso_stat {
ICE_CSO_STAT_RX_IP4_ERR,
ICE_CSO_STAT_RX_IP6_ERR,
ICE_CSO_STAT_RX_L3_ERR,
ICE_CSO_STAT_RX_TCP_ERR,
ICE_CSO_STAT_RX_UDP_ERR,
ICE_CSO_STAT_RX_SCTP_ERR,
ICE_CSO_STAT_RX_L4_ERR,
ICE_CSO_STAT_RX_COUNT
};
/**
* @enum ice_tx_cso_stat
* @brief software checksum offload statistics
*
* Enumeration of possible checksum offload statistics captured by software
* during the Tx path.
*/
enum ice_tx_cso_stat {
ICE_CSO_STAT_TX_TCP,
ICE_CSO_STAT_TX_UDP,
ICE_CSO_STAT_TX_SCTP,
ICE_CSO_STAT_TX_IP4,
ICE_CSO_STAT_TX_IP6,
ICE_CSO_STAT_TX_L3_ERR,
ICE_CSO_STAT_TX_L4_ERR,
ICE_CSO_STAT_TX_COUNT
};
/**
* @struct tx_stats
* @brief software Tx statistics
*
* Contains software counted Tx statistics for a single queue
*/
struct tx_stats {
/* Soft Stats */
u64 tx_bytes;
u64 tx_packets;
u64 mss_too_small;
u64 tso;
u64 cso[ICE_CSO_STAT_TX_COUNT];
};
/**
* @struct rx_stats
* @brief software Rx statistics
*
* Contains software counted Rx statistics for a single queue
*/
struct rx_stats {
/* Soft Stats */
u64 rx_packets;
u64 rx_bytes;
u64 desc_errs;
u64 cso[ICE_CSO_STAT_RX_COUNT];
};
/**
* @struct ice_vsi_hw_stats
* @brief hardware statistics for a VSI
*
* Stores statistics that are generated by hardware for a VSI.
*/
struct ice_vsi_hw_stats {
struct ice_eth_stats prev;
struct ice_eth_stats cur;
bool offsets_loaded;
};
/**
* @struct ice_pf_hw_stats
* @brief hardware statistics for a PF
*
* Stores statistics that are generated by hardware for each PF.
*/
struct ice_pf_hw_stats {
struct ice_hw_port_stats prev;
struct ice_hw_port_stats cur;
bool offsets_loaded;
};
/**
* @struct ice_pf_sw_stats
* @brief software statistics for a PF
*
* Contains software generated statistics relevant to a PF.
*/
struct ice_pf_sw_stats {
/* # of reset events handled, by type */
u32 corer_count;
u32 globr_count;
u32 empr_count;
u32 pfr_count;
/* # of detected MDD events for Tx and Rx */
u32 tx_mdd_count;
u32 rx_mdd_count;
u64 rx_roc_error; /* port oversize packet stats, error_cnt \
from GLV_REPC VSI register + RxOversize */
};
/**
* @struct ice_tc_info
* @brief Traffic class information for a VSI
*
* Stores traffic class information used in configuring
* a VSI.
*/
struct ice_tc_info {
u16 qoffset; /* Offset in VSI queue space */
u16 qcount_tx; /* TX queues for this Traffic Class */
u16 qcount_rx; /* RX queues */
};
/**
* @struct ice_vsi
* @brief VSI structure
*
* Contains data relevant to a single VSI
*/
struct ice_vsi {
/* back pointer to the softc */
struct ice_softc *sc;
bool dynamic; /* if true, dynamically allocated */
enum ice_vsi_type type; /* type of this VSI */
u16 idx; /* software index to sc->all_vsi[] */
u16 *tx_qmap; /* Tx VSI to PF queue mapping */
u16 *rx_qmap; /* Rx VSI to PF queue mapping */
enum ice_resmgr_alloc_type qmap_type;
struct ice_tx_queue *tx_queues; /* Tx queue array */
struct ice_rx_queue *rx_queues; /* Rx queue array */
int num_tx_queues;
int num_rx_queues;
int num_vectors;
int16_t rx_itr;
int16_t tx_itr;
/* RSS configuration */
u16 rss_table_size; /* HW RSS table size */
u8 rss_lut_type; /* Used to configure Get/Set RSS LUT AQ call */
int max_frame_size;
u16 mbuf_sz;
struct ice_aqc_vsi_props info;
/* DCB configuration */
u8 num_tcs; /* Total number of enabled TCs */
u16 tc_map; /* bitmap of enabled Traffic Classes */
/* Information for each traffic class */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
/* context for per-VSI sysctls */
struct sysctl_ctx_list ctx;
struct sysctl_oid *vsi_node;
/* context for per-txq sysctls */
struct sysctl_ctx_list txqs_ctx;
struct sysctl_oid *txqs_node;
/* context for per-rxq sysctls */
struct sysctl_ctx_list rxqs_ctx;
struct sysctl_oid *rxqs_node;
/* VSI-level stats */
struct ice_vsi_hw_stats hw_stats;
/* VSI mirroring details */
u16 mirror_src_vsi;
u16 rule_mir_ingress;
u16 rule_mir_egress;
#ifdef PCI_IOV
u8 vf_num; /* Index of owning VF, if applicable */
#endif
};
/**
* @struct ice_debug_dump_cmd
* @brief arguments/return value for debug dump ioctl
*/
struct ice_debug_dump_cmd {
u32 offset; /* offset to read/write from table, in bytes */
u16 cluster_id; /* also used to get next cluster id */
u16 table_id;
u16 data_size; /* size of data field, in bytes */
u16 reserved1;
u32 reserved2;
u8 data[];
};
/**
* @struct ice_serdes_equalization
* @brief serdes equalization info
*/
struct ice_serdes_equalization {
int rx_equalization_pre1;
int rx_equalization_pre2;
int rx_equalization_post1;
int rx_equalization_bflf;
int rx_equalization_bfhf;
int rx_equalization_drate;
int tx_equalization_pre1;
int tx_equalization_pre2;
int tx_equalization_pre3;
int tx_equalization_atten;
int tx_equalization_post1;
};
/**
* @struct ice_fec_stats_to_sysctl
* @brief FEC stats register value of port
*/
struct ice_fec_stats_to_sysctl {
u16 fec_corr_cnt_low;
u16 fec_corr_cnt_high;
u16 fec_uncorr_cnt_low;
u16 fec_uncorr_cnt_high;
};
#define ICE_MAX_SERDES_LANE_COUNT 4
/**
* @struct ice_regdump_to_sysctl
* @brief PHY stats of port
*/
struct ice_regdump_to_sysctl {
/* A multilane port can have max 4 serdes */
struct ice_serdes_equalization equalization[ICE_MAX_SERDES_LANE_COUNT];
struct ice_fec_stats_to_sysctl stats;
};
/**
* @struct ice_port_topology
* @brief Port topology from lport i.e. serdes mapping, pcsquad, macport, cage
*/
struct ice_port_topology {
u16 pcs_port;
u16 primary_serdes_lane;
u16 serdes_lane_count;
u16 pcs_quad_select;
};
/**
* @enum ice_state
* @brief Driver state flags
*
* Used to indicate the status of various driver events. Intended to be
* modified only using atomic operations, so that we can use it even in places
* which aren't locked.
*/
enum ice_state {
ICE_STATE_CONTROLQ_EVENT_PENDING,
ICE_STATE_VFLR_PENDING,
ICE_STATE_MDD_PENDING,
ICE_STATE_RESET_OICR_RECV,
ICE_STATE_RESET_PFR_REQ,
ICE_STATE_PREPARED_FOR_RESET,
ICE_STATE_SUBIF_NEEDS_REINIT,
ICE_STATE_RESET_FAILED,
ICE_STATE_DRIVER_INITIALIZED,
ICE_STATE_NO_MEDIA,
ICE_STATE_RECOVERY_MODE,
ICE_STATE_ROLLBACK_MODE,
ICE_STATE_LINK_STATUS_REPORTED,
ICE_STATE_ATTACHING,
ICE_STATE_DETACHING,
ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING,
ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER,
ICE_STATE_MULTIPLE_TCS,
ICE_STATE_DO_FW_DEBUG_DUMP,
ICE_STATE_LINK_ACTIVE_ON_DOWN,
ICE_STATE_FIRST_INIT_LINK,
ICE_STATE_DO_CREATE_MIRR_INTFC,
ICE_STATE_DO_DESTROY_MIRR_INTFC,
ICE_STATE_PHY_FW_INIT_PENDING,
/* This entry must be last */
ICE_STATE_LAST,
};
/* Functions for setting and checking driver state. Note the functions take
* bit positions, not bitmasks. The atomic_testandset_32 and
* atomic_testandclear_32 operations require bit positions, while the
* atomic_set_32 and atomic_clear_32 require bitmasks. This can easily lead to
* programming error, so we provide wrapper functions to avoid this.
*/
/**
* ice_set_state - Set the specified state
* @s: the state bitmap
* @bit: the state to set
*
* Atomically update the state bitmap with the specified bit set.
*/
static inline void
ice_set_state(volatile u32 *s, enum ice_state bit)
{
/* atomic_set_32 expects a bitmask */
atomic_set_32(s, BIT(bit));
}
/**
* ice_clear_state - Clear the specified state
* @s: the state bitmap
* @bit: the state to clear
*
* Atomically update the state bitmap with the specified bit cleared.
*/
static inline void
ice_clear_state(volatile u32 *s, enum ice_state bit)
{
/* atomic_clear_32 expects a bitmask */
atomic_clear_32(s, BIT(bit));
}
/**
* ice_testandset_state - Test and set the specified state
* @s: the state bitmap
* @bit: the bit to test
*
* Atomically update the state bitmap, setting the specified bit. Returns the
* previous value of the bit.
*/
static inline u32
ice_testandset_state(volatile u32 *s, enum ice_state bit)
{
/* atomic_testandset_32 expects a bit position */
return atomic_testandset_32(s, bit);
}
/**
* ice_testandclear_state - Test and clear the specified state
* @s: the state bitmap
* @bit: the bit to test
*
* Atomically update the state bitmap, clearing the specified bit. Returns the
* previous value of the bit.
*/
static inline u32
ice_testandclear_state(volatile u32 *s, enum ice_state bit)
{
/* atomic_testandclear_32 expects a bit position */
return atomic_testandclear_32(s, bit);
}
/**
* ice_test_state - Test the specified state
* @s: the state bitmap
* @bit: the bit to test
*
* Return true if the state is set, false otherwise. Use this only if the flow
* does not need to update the state. If you must update the state as well,
* prefer ice_testandset_state or ice_testandclear_state.
*/
static inline u32
ice_test_state(volatile u32 *s, enum ice_state bit)
{
return (*s & BIT(bit)) ? true : false;
}
/**
* @struct ice_str_buf
* @brief static length buffer for string returning
*
* Structure containing a fixed size string buffer, used to implement
* numeric->string conversion functions that may want to return non-constant
* strings.
*
* This allows returning a fixed size string that is generated by a conversion
* function, and then copied to the used location without needing to use an
* explicit local variable passed by reference.
*/
struct ice_str_buf {
char str[ICE_STR_BUF_LEN];
};
struct ice_str_buf _ice_aq_str(enum ice_aq_err aq_err);
struct ice_str_buf _ice_status_str(int status);
struct ice_str_buf _ice_err_str(int err);
struct ice_str_buf _ice_fltr_flag_str(u16 flag);
struct ice_str_buf _ice_log_sev_str(u8 log_level);
struct ice_str_buf _ice_mdd_tx_tclan_str(u8 event);
struct ice_str_buf _ice_mdd_tx_pqm_str(u8 event);
struct ice_str_buf _ice_mdd_rx_str(u8 event);
struct ice_str_buf _ice_fw_lldp_status(u32 lldp_status);
#define ice_aq_str(err) _ice_aq_str(err).str
#define ice_status_str(err) _ice_status_str(err).str
#define ice_err_str(err) _ice_err_str(err).str
#define ice_fltr_flag_str(flag) _ice_fltr_flag_str(flag).str
#define ice_mdd_tx_tclan_str(event) _ice_mdd_tx_tclan_str(event).str
#define ice_mdd_tx_pqm_str(event) _ice_mdd_tx_pqm_str(event).str
#define ice_mdd_rx_str(event) _ice_mdd_rx_str(event).str
#define ice_log_sev_str(log_level) _ice_log_sev_str(log_level).str
#define ice_fw_lldp_status(lldp_status) _ice_fw_lldp_status(lldp_status).str
/**
* ice_enable_intr - Enable interrupts for given vector
* @hw: the device private HW structure
* @vector: the interrupt index in PF space
*
* In MSI or Legacy interrupt mode, interrupt 0 is the only valid index.
*/
static inline void
ice_enable_intr(struct ice_hw *hw, int vector)
{
u32 dyn_ctl;
/* Use ITR_NONE so that ITR configuration is not changed. */
dyn_ctl = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
(ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
wr32(hw, GLINT_DYN_CTL(vector), dyn_ctl);
}
/**
* ice_disable_intr - Disable interrupts for given vector
* @hw: the device private HW structure
* @vector: the interrupt index in PF space
*
* In MSI or Legacy interrupt mode, interrupt 0 is the only valid index.
*/
static inline void
ice_disable_intr(struct ice_hw *hw, int vector)
{
u32 dyn_ctl;
/* Use ITR_NONE so that ITR configuration is not changed. */
dyn_ctl = ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S;
wr32(hw, GLINT_DYN_CTL(vector), dyn_ctl);
}
/**
* ice_is_tx_desc_done - determine if a Tx descriptor is done
* @txd: the Tx descriptor to check
*
* Returns true if hardware is done with a Tx descriptor and software is
* capable of re-using it.
*/
static inline bool
ice_is_tx_desc_done(struct ice_tx_desc *txd)
{
return (((txd->cmd_type_offset_bsz & ICE_TXD_QW1_DTYPE_M)
>> ICE_TXD_QW1_DTYPE_S) == ICE_TX_DESC_DTYPE_DESC_DONE);
}
/**
* ice_get_pf_id - Get the PF id from the hardware registers
* @hw: the ice hardware structure
*
* Reads the PF_FUNC_RID register and extracts the function number from it.
* Intended to be used in cases where hw->pf_id hasn't yet been assigned by
* ice_init_hw.
*
* @pre this function should be called only after PCI register access has been
* setup, and prior to ice_init_hw. After hardware has been initialized, the
* cached hw->pf_id value can be used.
*/
static inline u8
ice_get_pf_id(struct ice_hw *hw)
{
return (u8)((rd32(hw, PF_FUNC_RID) & PF_FUNC_RID_FUNCTION_NUMBER_M) >>
PF_FUNC_RID_FUNCTION_NUMBER_S);
}
/* Details of how to re-initialize depend on the networking stack */
void ice_request_stack_reinit(struct ice_softc *sc);
/* Details of how to check if the network stack is detaching us */
bool ice_driver_is_detaching(struct ice_softc *sc);
/* Details of how to setup/teardown a mirror interface */
/**
* @brief Create an interface for mirroring
*/
int ice_create_mirror_interface(struct ice_softc *sc);
/**
* @brief Destroy created mirroring interface
*/
void ice_destroy_mirror_interface(struct ice_softc *sc);
const char * ice_fw_module_str(enum ice_aqc_fw_logging_mod module);
void ice_add_fw_logging_tunables(struct ice_softc *sc,
struct sysctl_oid *parent);
void ice_handle_fw_log_event(struct ice_softc *sc, struct ice_aq_desc *desc,
void *buf);
int ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending);
int ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num);
void ice_free_bar(device_t dev, struct ice_bar_info *bar);
void ice_set_ctrlq_len(struct ice_hw *hw);
void ice_release_vsi(struct ice_vsi *vsi);
struct ice_vsi *ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type);
void ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
const int max_rx_queues);
void ice_free_vsi_qmaps(struct ice_vsi *vsi);
int ice_initialize_vsi(struct ice_vsi *vsi);
void ice_deinit_vsi(struct ice_vsi *vsi);
uint64_t ice_aq_speed_to_rate(struct ice_port_info *pi);
int ice_get_phy_type_low(uint64_t phy_type_low);
int ice_get_phy_type_high(uint64_t phy_type_high);
int ice_add_media_types(struct ice_softc *sc, struct ifmedia *media);
void ice_configure_rxq_interrupt(struct ice_hw *hw, u16 rxqid, u16 vector, u8 itr_idx);
void ice_configure_all_rxq_interrupts(struct ice_vsi *vsi);
void ice_configure_txq_interrupt(struct ice_hw *hw, u16 txqid, u16 vector, u8 itr_idx);
void ice_configure_all_txq_interrupts(struct ice_vsi *vsi);
void ice_flush_rxq_interrupts(struct ice_vsi *vsi);
void ice_flush_txq_interrupts(struct ice_vsi *vsi);
int ice_cfg_vsi_for_tx(struct ice_vsi *vsi);
int ice_cfg_vsi_for_rx(struct ice_vsi *vsi);
int ice_control_rx_queue(struct ice_vsi *vsi, u16 qidx, bool enable);
int ice_control_all_rx_queues(struct ice_vsi *vsi, bool enable);
int ice_cfg_pf_default_mac_filters(struct ice_softc *sc);
int ice_rm_pf_default_mac_filters(struct ice_softc *sc);
void ice_print_nvm_version(struct ice_softc *sc);
void ice_update_vsi_hw_stats(struct ice_vsi *vsi);
void ice_reset_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_softc *sc);
void ice_reset_pf_stats(struct ice_softc *sc);
void ice_add_device_sysctls(struct ice_softc *sc);
void ice_log_hmc_error(struct ice_hw *hw, device_t dev);
void ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid *parent,
struct ice_eth_stats *stats);
void ice_add_vsi_sysctls(struct ice_vsi *vsi);
void ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid *parent,
struct ice_softc *sc);
void ice_configure_misc_interrupts(struct ice_softc *sc);
int ice_sync_multicast_filters(struct ice_softc *sc);
int ice_add_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid,
u16 length);
int ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
int ice_remove_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid,
u16 length);
int ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent);
void ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi);
void ice_add_device_tunables(struct ice_softc *sc);
int ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr);
int ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr);
int ice_vsi_disable_tx(struct ice_vsi *vsi);
void ice_vsi_add_txqs_ctx(struct ice_vsi *vsi);
void ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi);
void ice_vsi_del_txqs_ctx(struct ice_vsi *vsi);
void ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi);
void ice_add_txq_sysctls(struct ice_tx_queue *txq);
void ice_add_rxq_sysctls(struct ice_rx_queue *rxq);
int ice_config_rss(struct ice_vsi *vsi);
void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc);
int ice_load_pkg_file(struct ice_softc *sc);
void ice_log_pkg_init(struct ice_softc *sc, enum ice_ddp_state pkg_status);
uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter);
void ice_save_pci_info(struct ice_hw *hw, device_t dev);
int ice_replay_all_vsi_cfg(struct ice_softc *sc);
void ice_link_up_msg(struct ice_softc *sc);
int ice_update_laa_mac(struct ice_softc *sc);
void ice_get_and_print_bus_info(struct ice_softc *sc);
const char *ice_fec_str(enum ice_fec_mode mode);
const char *ice_fc_str(enum ice_fc_mode mode);
const char *ice_fwd_act_str(enum ice_sw_fwd_act_type action);
const char *ice_state_to_str(enum ice_state state);
int ice_init_link_events(struct ice_softc *sc);
void ice_configure_rx_itr(struct ice_vsi *vsi);
void ice_configure_tx_itr(struct ice_vsi *vsi);
void ice_setup_pf_vsi(struct ice_softc *sc);
void ice_handle_mdd_event(struct ice_softc *sc);
void ice_init_dcb_setup(struct ice_softc *sc);
int ice_send_version(struct ice_softc *sc);
int ice_cfg_pf_ethertype_filters(struct ice_softc *sc);
void ice_init_link_configuration(struct ice_softc *sc);
void ice_init_saved_phy_cfg(struct ice_softc *sc);
int ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings);
void ice_set_link_management_mode(struct ice_softc *sc);
int ice_module_event_handler(module_t mod, int what, void *arg);
int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd);
int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req);
int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length);
int ice_alloc_intr_tracking(struct ice_softc *sc);
void ice_free_intr_tracking(struct ice_softc *sc);
void ice_set_default_local_lldp_mib(struct ice_softc *sc);
void ice_set_link(struct ice_softc *sc, bool enabled);
void ice_add_rx_lldp_filter(struct ice_softc *sc);
void ice_init_health_events(struct ice_softc *sc);
void ice_cfg_pba_num(struct ice_softc *sc);
int ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd);
u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg);
void ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib);
int ice_setup_vsi_mirroring(struct ice_vsi *vsi);
#endif /* _ICE_LIB_H_ */
|