xref: /haiku/src/add-ons/kernel/drivers/network/wlan/idualwifi7260/dev/pci/if_iwm.c (revision df49d6c9348d4b595dd76333bb06404c57f582b7)
1 /*	$OpenBSD: if_iwm.c,v 1.414 2024/02/16 11:44:52 stsp Exp $	*/
2 
3 /*
4  * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5  *   Author: Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2014 Fixup Software Ltd.
7  * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*-
23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24  * which were used as the reference documentation for this implementation.
25  *
26  ***********************************************************************
27  *
28  * This file is provided under a dual BSD/GPLv2 license.  When using or
29  * redistributing this file, you may do so under either license.
30  *
31  * GPL LICENSE SUMMARY
32  *
33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35  * Copyright(c) 2016 Intel Deutschland GmbH
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of version 2 of the GNU General Public License as
39  * published by the Free Software Foundation.
40  *
41  * This program is distributed in the hope that it will be useful, but
42  * WITHOUT ANY WARRANTY; without even the implied warranty of
43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
44  * General Public License for more details.
45  *
46  * You should have received a copy of the GNU General Public License
47  * along with this program; if not, write to the Free Software
48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49  * USA
50  *
51  * The full GNU General Public License is included in this distribution
52  * in the file called COPYING.
53  *
54  * Contact Information:
55  *  Intel Linux Wireless <ilw@linux.intel.com>
56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57  *
58  *
59  * BSD LICENSE
60  *
61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63  * Copyright(c) 2016 Intel Deutschland GmbH
64  * All rights reserved.
65  *
66  * Redistribution and use in source and binary forms, with or without
67  * modification, are permitted provided that the following conditions
68  * are met:
69  *
70  *  * Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  *  * Redistributions in binary form must reproduce the above copyright
73  *    notice, this list of conditions and the following disclaimer in
74  *    the documentation and/or other materials provided with the
75  *    distribution.
76  *  * Neither the name Intel Corporation nor the names of its
77  *    contributors may be used to endorse or promote products derived
78  *    from this software without specific prior written permission.
79  *
80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91  */
92 
93 /*-
94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95  *
96  * Permission to use, copy, modify, and distribute this software for any
97  * purpose with or without fee is hereby granted, provided that the above
98  * copyright notice and this permission notice appear in all copies.
99  *
100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107  */
108 
109 //#include "bpfilter.h"
110 
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/rwlock.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/systm.h>
122 #include <sys/endian.h>
123 
124 #include <sys/refcnt.h>
125 #include <sys/task.h>
126 #include <machine/bus.h>
127 //#include <machine/intr.h>
128 
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 //#include <dev/pci/pcidevs.h>
132 
133 #if NBPFILTER > 0
134 #include <net/bpf.h>
135 #endif
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 #include <net/if_types.h>
140 
141 #include <netinet/in.h>
142 #include <netinet/if_ether.h>
143 
144 #include <net80211/ieee80211_var.h>
145 #include <net80211/ieee80211_amrr.h>
146 #include <net80211/ieee80211_ra.h>
147 #include <net80211/ieee80211_ra_vht.h>
148 #include <net80211/ieee80211_radiotap.h>
149 #include <net80211/ieee80211_priv.h> /* for SEQ_LT */
150 #undef DPRINTF /* defined in ieee80211_priv.h */
151 
152 #ifdef __FreeBSD_version
153 #include <sys/device.h>
154 #include <net/ifq.h>
155 #define DEVNAME(_s) gDriverName
156 #define SC_DEV_FOR_PCI sc->sc_dev
157 #else
158 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
159 #endif
160 
161 #define IC2IFP(_ic_) (&(_ic_)->ic_if)
162 
163 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
164 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
165 
166 #ifdef IWM_DEBUG
167 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
168 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
169 int iwm_debug = 1;
170 #else
171 #define DPRINTF(x)	do { ; } while (0)
172 #define DPRINTFN(n, x)	do { ; } while (0)
173 #endif
174 
175 #include <dev/pci/if_iwmreg.h>
176 #include <dev/pci/if_iwmvar.h>
177 
178 const uint8_t iwm_nvm_channels[] = {
179 	/* 2.4 GHz */
180 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
181 	/* 5 GHz */
182 	36, 40, 44 , 48, 52, 56, 60, 64,
183 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
184 	149, 153, 157, 161, 165
185 };
186 
187 const uint8_t iwm_nvm_channels_8000[] = {
188 	/* 2.4 GHz */
189 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
190 	/* 5 GHz */
191 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
192 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
193 	149, 153, 157, 161, 165, 169, 173, 177, 181
194 };
195 
196 #define IWM_NUM_2GHZ_CHANNELS	14
197 
198 const struct iwm_rate {
199 	uint16_t rate;
200 	uint8_t plcp;
201 	uint8_t ht_plcp;
202 } iwm_rates[] = {
203 		/* Legacy */		/* HT */
204 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
205 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
206 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
207 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
208 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
209 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
210 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
211 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_8_PLCP },
212 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
213 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
214 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_9_PLCP },
215 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
216 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_10_PLCP },
217 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
218 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_11_PLCP },
219 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
220 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
221 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_12_PLCP },
222 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_13_PLCP },
223 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_14_PLCP },
224 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_15_PLCP },
225 };
226 #define IWM_RIDX_CCK	0
227 #define IWM_RIDX_OFDM	4
228 #define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
229 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
230 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
231 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
232 
233 /* Convert an MCS index into an iwm_rates[] index. */
234 const int iwm_ht_mcs2ridx[] = {
235 	IWM_RATE_MCS_0_INDEX,
236 	IWM_RATE_MCS_1_INDEX,
237 	IWM_RATE_MCS_2_INDEX,
238 	IWM_RATE_MCS_3_INDEX,
239 	IWM_RATE_MCS_4_INDEX,
240 	IWM_RATE_MCS_5_INDEX,
241 	IWM_RATE_MCS_6_INDEX,
242 	IWM_RATE_MCS_7_INDEX,
243 	IWM_RATE_MCS_8_INDEX,
244 	IWM_RATE_MCS_9_INDEX,
245 	IWM_RATE_MCS_10_INDEX,
246 	IWM_RATE_MCS_11_INDEX,
247 	IWM_RATE_MCS_12_INDEX,
248 	IWM_RATE_MCS_13_INDEX,
249 	IWM_RATE_MCS_14_INDEX,
250 	IWM_RATE_MCS_15_INDEX,
251 };
252 
253 struct iwm_nvm_section {
254 	uint16_t length;
255 	uint8_t *data;
256 };
257 
258 int	iwm_is_mimo_ht_plcp(uint8_t);
259 int	iwm_is_mimo_ht_mcs(int);
260 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
261 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
262 	    uint8_t *, size_t);
263 int	iwm_set_default_calib(struct iwm_softc *, const void *);
264 void	iwm_fw_info_free(struct iwm_fw_info *);
265 void	iwm_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
266 int	iwm_read_firmware(struct iwm_softc *);
267 uint32_t iwm_read_prph_unlocked(struct iwm_softc *, uint32_t);
268 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
269 void	iwm_write_prph_unlocked(struct iwm_softc *, uint32_t, uint32_t);
270 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
271 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
272 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
273 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
274 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
275 int	iwm_nic_lock(struct iwm_softc *);
276 void	iwm_nic_assert_locked(struct iwm_softc *);
277 void	iwm_nic_unlock(struct iwm_softc *);
278 int	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
279 	    uint32_t);
280 int	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
281 int	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
282 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
283 	    bus_size_t);
284 void	iwm_dma_contig_free(struct iwm_dma_info *);
285 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
286 void	iwm_disable_rx_dma(struct iwm_softc *);
287 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
288 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
289 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
290 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
291 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
292 void	iwm_enable_rfkill_int(struct iwm_softc *);
293 int	iwm_check_rfkill(struct iwm_softc *);
294 void	iwm_enable_interrupts(struct iwm_softc *);
295 void	iwm_enable_fwload_interrupt(struct iwm_softc *);
296 void	iwm_restore_interrupts(struct iwm_softc *);
297 void	iwm_disable_interrupts(struct iwm_softc *);
298 void	iwm_ict_reset(struct iwm_softc *);
299 int	iwm_set_hw_ready(struct iwm_softc *);
300 int	iwm_prepare_card_hw(struct iwm_softc *);
301 void	iwm_apm_config(struct iwm_softc *);
302 int	iwm_apm_init(struct iwm_softc *);
303 void	iwm_apm_stop(struct iwm_softc *);
304 int	iwm_allow_mcast(struct iwm_softc *);
305 void	iwm_init_msix_hw(struct iwm_softc *);
306 void	iwm_conf_msix_hw(struct iwm_softc *, int);
307 int	iwm_clear_persistence_bit(struct iwm_softc *);
308 int	iwm_start_hw(struct iwm_softc *);
309 void	iwm_stop_device(struct iwm_softc *);
310 void	iwm_nic_config(struct iwm_softc *);
311 int	iwm_nic_rx_init(struct iwm_softc *);
312 int	iwm_nic_rx_legacy_init(struct iwm_softc *);
313 int	iwm_nic_rx_mq_init(struct iwm_softc *);
314 int	iwm_nic_tx_init(struct iwm_softc *);
315 int	iwm_nic_init(struct iwm_softc *);
316 int	iwm_enable_ac_txq(struct iwm_softc *, int, int);
317 int	iwm_enable_txq(struct iwm_softc *, int, int, int, int, uint8_t,
318 	    uint16_t);
319 int	iwm_disable_txq(struct iwm_softc *, int, int, uint8_t);
320 int	iwm_post_alive(struct iwm_softc *);
321 struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
322 	    uint16_t);
323 int	iwm_phy_db_set_section(struct iwm_softc *,
324 	    struct iwm_calib_res_notif_phy_db *);
325 int	iwm_is_valid_channel(uint16_t);
326 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
327 uint16_t iwm_channel_id_to_papd(uint16_t);
328 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
329 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
330 	    uint16_t *, uint16_t);
331 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
332 int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
333 	    uint8_t);
334 int	iwm_send_phy_db_data(struct iwm_softc *);
335 void	iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
336 	    uint32_t);
337 void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
338 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
339 	    uint8_t *, uint16_t *);
340 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
341 	    uint16_t *, size_t);
342 uint8_t	iwm_fw_valid_tx_ant(struct iwm_softc *);
343 uint8_t	iwm_fw_valid_rx_ant(struct iwm_softc *);
344 int	iwm_valid_siso_ant_rate_mask(struct iwm_softc *);
345 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
346 	    const uint8_t *nvm_channels, int nchan);
347 int	iwm_mimo_enabled(struct iwm_softc *);
348 void	iwm_setup_ht_rates(struct iwm_softc *);
349 void	iwm_setup_vht_rates(struct iwm_softc *);
350 void	iwm_mac_ctxt_task(void *);
351 void	iwm_phy_ctxt_task(void *);
352 void	iwm_updateprot(struct ieee80211com *);
353 void	iwm_updateslot(struct ieee80211com *);
354 void	iwm_updateedca(struct ieee80211com *);
355 void	iwm_updatechan(struct ieee80211com *);
356 void	iwm_updatedtim(struct ieee80211com *);
357 void	iwm_init_reorder_buffer(struct iwm_reorder_buffer *, uint16_t,
358 	    uint16_t);
359 void	iwm_clear_reorder_buffer(struct iwm_softc *, struct iwm_rxba_data *);
360 int	iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
361 	    uint8_t);
362 void	iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
363 	    uint8_t);
364 void	iwm_rx_ba_session_expired(void *);
365 void	iwm_reorder_timer_expired(void *);
366 int	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
367 	    uint16_t, uint16_t, int, int);
368 int	iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
369 	    uint8_t);
370 void	iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
371 	    uint8_t);
372 void	iwm_ba_task(void *);
373 
374 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
375 	    const uint16_t *, const uint16_t *,
376 	    const uint16_t *, const uint16_t *,
377 	    const uint16_t *, int);
378 void	iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
379 	    const uint16_t *, const uint16_t *);
380 int	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
381 int	iwm_nvm_init(struct iwm_softc *);
382 int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
383 	    uint32_t);
384 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
385 	    uint32_t);
386 int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
387 int	iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
388 	    int , int *);
389 int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
390 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
391 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
392 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
393 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
394 int	iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
395 int	iwm_send_dqa_cmd(struct iwm_softc *);
396 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
397 int	iwm_config_ltr(struct iwm_softc *);
398 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
399 int	iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
400 int	iwm_rxmq_get_signal_strength(struct iwm_softc *, struct iwm_rx_mpdu_desc *);
401 void	iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
402 	    struct iwm_rx_data *);
403 int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
404 int	iwm_rx_hwdecrypt(struct iwm_softc *, struct mbuf *, uint32_t,
405 	    struct ieee80211_rxinfo *);
406 int	iwm_ccmp_decap(struct iwm_softc *, struct mbuf *,
407 	    struct ieee80211_node *, struct ieee80211_rxinfo *);
408 void	iwm_rx_frame(struct iwm_softc *, struct mbuf *, int, uint32_t, int, int,
409 	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
410 void	iwm_ht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
411 	    int, uint8_t, int);
412 void	iwm_vht_single_rate_control(struct iwm_softc *, struct ieee80211_node *,
413 	    int, int, uint8_t, int);
414 void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
415 	    struct iwm_node *, int, int);
416 void	iwm_txd_done(struct iwm_softc *, struct iwm_tx_data *);
417 void	iwm_txq_advance(struct iwm_softc *, struct iwm_tx_ring *, int);
418 void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
419 	    struct iwm_rx_data *);
420 void	iwm_clear_oactive(struct iwm_softc *, struct iwm_tx_ring *);
421 void	iwm_ampdu_rate_control(struct iwm_softc *, struct ieee80211_node *,
422 	    struct iwm_tx_ring *, int, uint16_t, uint16_t);
423 void	iwm_rx_compressed_ba(struct iwm_softc *, struct iwm_rx_packet *);
424 void	iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
425 	    struct iwm_rx_data *);
426 int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
427 uint8_t	iwm_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
428 int	iwm_phy_ctxt_cmd_uhb(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
429 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
430 void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
431 	    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
432 void	iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
433 	    struct ieee80211_channel *, uint8_t, uint8_t, uint8_t, uint8_t);
434 int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
435 	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
436 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
437 int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
438 	    const void *);
439 int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
440 	    uint32_t *);
441 int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
442 	    const void *, uint32_t *);
443 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
444 void	iwm_cmd_done(struct iwm_softc *, int, int, int);
445 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
446 void	iwm_reset_sched(struct iwm_softc *, int, int, uint8_t);
447 uint8_t	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
448 	    struct ieee80211_frame *, struct iwm_tx_cmd *);
449 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
450 int	iwm_flush_tx_path(struct iwm_softc *, int);
451 int	iwm_wait_tx_queues_empty(struct iwm_softc *);
452 void	iwm_led_enable(struct iwm_softc *);
453 void	iwm_led_disable(struct iwm_softc *);
454 int	iwm_led_is_enabled(struct iwm_softc *);
455 void	iwm_led_blink_timeout(void *);
456 void	iwm_led_blink_start(struct iwm_softc *);
457 void	iwm_led_blink_stop(struct iwm_softc *);
458 int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
459 	    struct iwm_beacon_filter_cmd *);
460 void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
461 	    struct iwm_beacon_filter_cmd *);
462 int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
463 void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
464 	    struct iwm_mac_power_cmd *);
465 int	iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
466 int	iwm_power_update_device(struct iwm_softc *);
467 int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
468 int	iwm_disable_beacon_filter(struct iwm_softc *);
469 int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
470 int	iwm_add_aux_sta(struct iwm_softc *);
471 int	iwm_drain_sta(struct iwm_softc *sc, struct iwm_node *, int);
472 int	iwm_flush_sta(struct iwm_softc *, struct iwm_node *);
473 int	iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
474 uint16_t iwm_scan_rx_chain(struct iwm_softc *);
475 uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
476 uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
477 	    struct iwm_scan_channel_cfg_lmac *, int, int);
478 int	iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
479 int	iwm_lmac_scan(struct iwm_softc *, int);
480 int	iwm_config_umac_scan(struct iwm_softc *);
481 int	iwm_umac_scan(struct iwm_softc *, int);
482 void	iwm_mcc_update(struct iwm_softc *, struct iwm_mcc_chub_notif *);
483 uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
484 int	iwm_rval2ridx(int);
485 void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
486 void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
487 	    struct iwm_mac_ctx_cmd *, uint32_t);
488 void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
489 	    struct iwm_mac_data_sta *, int);
490 int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
491 int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
492 void	iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
493 void	iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
494 int	iwm_scan(struct iwm_softc *);
495 int	iwm_bgscan(struct ieee80211com *);
496 void	iwm_bgscan_done(struct ieee80211com *,
497 	    struct ieee80211_node_switch_bss_arg *, size_t);
498 void	iwm_bgscan_done_task(void *);
499 int	iwm_umac_scan_abort(struct iwm_softc *);
500 int	iwm_lmac_scan_abort(struct iwm_softc *);
501 int	iwm_scan_abort(struct iwm_softc *);
502 int	iwm_phy_ctxt_update(struct iwm_softc *, struct iwm_phy_ctxt *,
503 	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
504 	    uint8_t);
505 int	iwm_auth(struct iwm_softc *);
506 int	iwm_deauth(struct iwm_softc *);
507 int	iwm_run(struct iwm_softc *);
508 int	iwm_run_stop(struct iwm_softc *);
509 struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
510 int	iwm_set_key_v1(struct ieee80211com *, struct ieee80211_node *,
511 	    struct ieee80211_key *);
512 int	iwm_set_key(struct ieee80211com *, struct ieee80211_node *,
513 	    struct ieee80211_key *);
514 void	iwm_delete_key_v1(struct ieee80211com *,
515 	    struct ieee80211_node *, struct ieee80211_key *);
516 void	iwm_delete_key(struct ieee80211com *,
517 	    struct ieee80211_node *, struct ieee80211_key *);
518 void	iwm_calib_timeout(void *);
519 void	iwm_set_rate_table_vht(struct iwm_node *, struct iwm_lq_cmd *);
520 void	iwm_set_rate_table(struct iwm_node *, struct iwm_lq_cmd *);
521 void	iwm_setrates(struct iwm_node *, int);
522 int	iwm_media_change(struct ifnet *);
523 void	iwm_newstate_task(void *);
524 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
525 void	iwm_endscan(struct iwm_softc *);
526 void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
527 	    struct ieee80211_node *);
528 int	iwm_sf_config(struct iwm_softc *, int);
529 int	iwm_send_bt_init_conf(struct iwm_softc *);
530 int	iwm_send_soc_conf(struct iwm_softc *);
531 int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
532 int	iwm_send_temp_report_ths_cmd(struct iwm_softc *);
533 void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
534 void	iwm_free_fw_paging(struct iwm_softc *);
535 int	iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
536 int	iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
537 int	iwm_init_hw(struct iwm_softc *);
538 int	iwm_init(struct ifnet *);
539 void	iwm_start(struct ifnet *);
540 void	iwm_stop(struct ifnet *);
541 void	iwm_watchdog(struct ifnet *);
542 int	iwm_ioctl(struct ifnet *, u_long, caddr_t);
543 const char *iwm_desc_lookup(uint32_t);
544 void	iwm_nic_error(struct iwm_softc *);
545 void	iwm_dump_driver_status(struct iwm_softc *);
546 void	iwm_nic_umac_error(struct iwm_softc *);
547 void	iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, void *, size_t,
548 	    struct mbuf_list *);
549 void	iwm_flip_address(uint8_t *);
550 int	iwm_detect_duplicate(struct iwm_softc *, struct mbuf *,
551 	    struct iwm_rx_mpdu_desc *, struct ieee80211_rxinfo *);
552 int	iwm_is_sn_less(uint16_t, uint16_t, uint16_t);
553 void	iwm_release_frames(struct iwm_softc *, struct ieee80211_node *,
554 	    struct iwm_rxba_data *, struct iwm_reorder_buffer *, uint16_t,
555 	    struct mbuf_list *);
556 int	iwm_oldsn_workaround(struct iwm_softc *, struct ieee80211_node *,
557 	    int, struct iwm_reorder_buffer *, uint32_t, uint32_t);
558 int	iwm_rx_reorder(struct iwm_softc *, struct mbuf *, int,
559 	    struct iwm_rx_mpdu_desc *, int, int, uint32_t,
560 	    struct ieee80211_rxinfo *, struct mbuf_list *);
561 void	iwm_rx_mpdu_mq(struct iwm_softc *, struct mbuf *, void *, size_t,
562 	    struct mbuf_list *);
563 int	iwm_rx_pkt_valid(struct iwm_rx_packet *);
564 void	iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *,
565 	    struct mbuf_list *);
566 void	iwm_notif_intr(struct iwm_softc *);
567 int	iwm_intr(void *);
568 int	iwm_intr_msix(void *);
569 int	iwm_match(struct device *, void *, void *);
570 int	iwm_preinit(struct iwm_softc *);
571 void	iwm_attach_hook(struct device *);
572 //void	iwm_attach(struct device *, struct device *, void *);
573 void	iwm_init_task(void *);
574 int	iwm_activate(struct device *, int);
575 void	iwm_resume(struct iwm_softc *);
576 int	iwm_wakeup(struct iwm_softc *);
577 
578 #if NBPFILTER > 0
579 void	iwm_radiotap_attach(struct iwm_softc *);
580 #endif
581 
582 uint8_t
iwm_lookup_cmd_ver(struct iwm_softc * sc,uint8_t grp,uint8_t cmd)583 iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd)
584 {
585 	const struct iwm_fw_cmd_version *entry;
586 	int i;
587 
588 	for (i = 0; i < sc->n_cmd_versions; i++) {
589 		entry = &sc->cmd_versions[i];
590 		if (entry->group == grp && entry->cmd == cmd)
591 			return entry->cmd_ver;
592 	}
593 
594 	return IWM_FW_CMD_VER_UNKNOWN;
595 }
596 
597 int
iwm_is_mimo_ht_plcp(uint8_t ht_plcp)598 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
599 {
600 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
601 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
602 }
603 
604 int
iwm_is_mimo_ht_mcs(int mcs)605 iwm_is_mimo_ht_mcs(int mcs)
606 {
607 	int ridx = iwm_ht_mcs2ridx[mcs];
608 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
609 
610 }
611 
612 int
iwm_store_cscheme(struct iwm_softc * sc,uint8_t * data,size_t dlen)613 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
614 {
615 	struct iwm_fw_cscheme_list *l = (void *)data;
616 
617 	if (dlen < sizeof(*l) ||
618 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
619 		return EINVAL;
620 
621 	/* we don't actually store anything for now, always use s/w crypto */
622 
623 	return 0;
624 }
625 
626 int
iwm_firmware_store_section(struct iwm_softc * sc,enum iwm_ucode_type type,uint8_t * data,size_t dlen)627 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
628     uint8_t *data, size_t dlen)
629 {
630 	struct iwm_fw_sects *fws;
631 	struct iwm_fw_onesect *fwone;
632 
633 	if (type >= IWM_UCODE_TYPE_MAX)
634 		return EINVAL;
635 	if (dlen < sizeof(uint32_t))
636 		return EINVAL;
637 
638 	fws = &sc->sc_fw.fw_sects[type];
639 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
640 		return EINVAL;
641 
642 	fwone = &fws->fw_sect[fws->fw_count];
643 
644 	/* first 32bit are device load offset */
645 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
646 
647 	/* rest is data */
648 	fwone->fws_data = data + sizeof(uint32_t);
649 	fwone->fws_len = dlen - sizeof(uint32_t);
650 
651 	fws->fw_count++;
652 	fws->fw_totlen += fwone->fws_len;
653 
654 	return 0;
655 }
656 
657 #define IWM_DEFAULT_SCAN_CHANNELS	40
658 /* Newer firmware might support more channels. Raise this value if needed. */
659 #define IWM_MAX_SCAN_CHANNELS		52 /* as of 8265-34 firmware image */
660 
661 struct iwm_tlv_calib_data {
662 	uint32_t ucode_type;
663 	struct iwm_tlv_calib_ctrl calib;
664 } __packed;
665 
666 int
iwm_set_default_calib(struct iwm_softc * sc,const void * data)667 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
668 {
669 	const struct iwm_tlv_calib_data *def_calib = data;
670 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
671 
672 	if (ucode_type >= IWM_UCODE_TYPE_MAX)
673 		return EINVAL;
674 
675 	sc->sc_default_calib[ucode_type].flow_trigger =
676 	    def_calib->calib.flow_trigger;
677 	sc->sc_default_calib[ucode_type].event_trigger =
678 	    def_calib->calib.event_trigger;
679 
680 	return 0;
681 }
682 
683 void
iwm_fw_info_free(struct iwm_fw_info * fw)684 iwm_fw_info_free(struct iwm_fw_info *fw)
685 {
686 	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
687 	fw->fw_rawdata = NULL;
688 	fw->fw_rawsize = 0;
689 	/* don't touch fw->fw_status */
690 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
691 }
692 
693 void
iwm_fw_version_str(char * buf,size_t bufsize,uint32_t major,uint32_t minor,uint32_t api)694 iwm_fw_version_str(char *buf, size_t bufsize,
695     uint32_t major, uint32_t minor, uint32_t api)
696 {
697 	/*
698 	 * Starting with major version 35 the Linux driver prints the minor
699 	 * version in hexadecimal.
700 	 */
701 	if (major >= 35)
702 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
703 	else
704 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
705 }
706 
707 int
iwm_read_firmware(struct iwm_softc * sc)708 iwm_read_firmware(struct iwm_softc *sc)
709 {
710 	struct iwm_fw_info *fw = &sc->sc_fw;
711 	struct iwm_tlv_ucode_header *uhdr;
712 	struct iwm_ucode_tlv tlv;
713 	uint32_t tlv_type;
714 	uint8_t *data;
715 	uint32_t usniffer_img;
716 	uint32_t paging_mem_size;
717 	int err;
718 	size_t len;
719 
720 	if (fw->fw_status == IWM_FW_STATUS_DONE)
721 		return 0;
722 
723 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
724 		tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP);
725 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
726 
727 	if (fw->fw_rawdata != NULL)
728 		iwm_fw_info_free(fw);
729 
730 	err = loadfirmware(sc->sc_fwname,
731 	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
732 	if (err) {
733 		printf("%s: could not read firmware %s (error %d)\n",
734 		    DEVNAME(sc), sc->sc_fwname, err);
735 		goto out;
736 	}
737 
738 	sc->sc_capaflags = 0;
739 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
740 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
741 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
742 	sc->n_cmd_versions = 0;
743 
744 	uhdr = (void *)fw->fw_rawdata;
745 	if (*(uint32_t *)fw->fw_rawdata != 0
746 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
747 		printf("%s: invalid firmware %s\n",
748 		    DEVNAME(sc), sc->sc_fwname);
749 		err = EINVAL;
750 		goto out;
751 	}
752 
753 	iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
754 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
755 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
756 	    IWM_UCODE_API(le32toh(uhdr->ver)));
757 
758 	data = uhdr->data;
759 	len = fw->fw_rawsize - sizeof(*uhdr);
760 
761 	while (len >= sizeof(tlv)) {
762 		size_t tlv_len;
763 		void *tlv_data;
764 
765 		memcpy(&tlv, data, sizeof(tlv));
766 		tlv_len = le32toh(tlv.length);
767 		tlv_type = le32toh(tlv.type);
768 
769 		len -= sizeof(tlv);
770 		data += sizeof(tlv);
771 		tlv_data = data;
772 
773 		if (len < tlv_len) {
774 			printf("%s: firmware too short: %zu bytes\n",
775 			    DEVNAME(sc), len);
776 			err = EINVAL;
777 			goto parse_out;
778 		}
779 
780 		switch (tlv_type) {
781 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
782 			if (tlv_len < sizeof(uint32_t)) {
783 				err = EINVAL;
784 				goto parse_out;
785 			}
786 			sc->sc_capa_max_probe_len
787 			    = le32toh(*(uint32_t *)tlv_data);
788 			if (sc->sc_capa_max_probe_len >
789 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
790 				err = EINVAL;
791 				goto parse_out;
792 			}
793 			break;
794 		case IWM_UCODE_TLV_PAN:
795 			if (tlv_len) {
796 				err = EINVAL;
797 				goto parse_out;
798 			}
799 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
800 			break;
801 		case IWM_UCODE_TLV_FLAGS:
802 			if (tlv_len < sizeof(uint32_t)) {
803 				err = EINVAL;
804 				goto parse_out;
805 			}
806 			/*
807 			 * Apparently there can be many flags, but Linux driver
808 			 * parses only the first one, and so do we.
809 			 *
810 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
811 			 * Intentional or a bug?  Observations from
812 			 * current firmware file:
813 			 *  1) TLV_PAN is parsed first
814 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
815 			 * ==> this resets TLV_PAN to itself... hnnnk
816 			 */
817 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
818 			break;
819 		case IWM_UCODE_TLV_CSCHEME:
820 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
821 			if (err)
822 				goto parse_out;
823 			break;
824 		case IWM_UCODE_TLV_NUM_OF_CPU: {
825 			uint32_t num_cpu;
826 			if (tlv_len != sizeof(uint32_t)) {
827 				err = EINVAL;
828 				goto parse_out;
829 			}
830 			num_cpu = le32toh(*(uint32_t *)tlv_data);
831 			if (num_cpu < 1 || num_cpu > 2) {
832 				err = EINVAL;
833 				goto parse_out;
834 			}
835 			break;
836 		}
837 		case IWM_UCODE_TLV_SEC_RT:
838 			err = iwm_firmware_store_section(sc,
839 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
840 			if (err)
841 				goto parse_out;
842 			break;
843 		case IWM_UCODE_TLV_SEC_INIT:
844 			err = iwm_firmware_store_section(sc,
845 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
846 			if (err)
847 				goto parse_out;
848 			break;
849 		case IWM_UCODE_TLV_SEC_WOWLAN:
850 			err = iwm_firmware_store_section(sc,
851 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
852 			if (err)
853 				goto parse_out;
854 			break;
855 		case IWM_UCODE_TLV_DEF_CALIB:
856 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
857 				err = EINVAL;
858 				goto parse_out;
859 			}
860 			err = iwm_set_default_calib(sc, tlv_data);
861 			if (err)
862 				goto parse_out;
863 			break;
864 		case IWM_UCODE_TLV_PHY_SKU:
865 			if (tlv_len != sizeof(uint32_t)) {
866 				err = EINVAL;
867 				goto parse_out;
868 			}
869 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
870 			break;
871 
872 		case IWM_UCODE_TLV_API_CHANGES_SET: {
873 			struct iwm_ucode_api *api;
874 			int idx, i;
875 			if (tlv_len != sizeof(*api)) {
876 				err = EINVAL;
877 				goto parse_out;
878 			}
879 			api = (struct iwm_ucode_api *)tlv_data;
880 			idx = le32toh(api->api_index);
881 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
882 				err = EINVAL;
883 				goto parse_out;
884 			}
885 			for (i = 0; i < 32; i++) {
886 				if ((le32toh(api->api_flags) & (1 << i)) == 0)
887 					continue;
888 				setbit(sc->sc_ucode_api, i + (32 * idx));
889 			}
890 			break;
891 		}
892 
893 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
894 			struct iwm_ucode_capa *capa;
895 			int idx, i;
896 			if (tlv_len != sizeof(*capa)) {
897 				err = EINVAL;
898 				goto parse_out;
899 			}
900 			capa = (struct iwm_ucode_capa *)tlv_data;
901 			idx = le32toh(capa->api_index);
902 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
903 				goto parse_out;
904 			}
905 			for (i = 0; i < 32; i++) {
906 				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
907 					continue;
908 				setbit(sc->sc_enabled_capa, i + (32 * idx));
909 			}
910 			break;
911 		}
912 
913 		case IWM_UCODE_TLV_CMD_VERSIONS:
914 			if (tlv_len % sizeof(struct iwm_fw_cmd_version)) {
915 				tlv_len /= sizeof(struct iwm_fw_cmd_version);
916 				tlv_len *= sizeof(struct iwm_fw_cmd_version);
917 			}
918 			if (sc->n_cmd_versions != 0) {
919 				err = EINVAL;
920 				goto parse_out;
921 			}
922 			if (tlv_len > sizeof(sc->cmd_versions)) {
923 				err = EINVAL;
924 				goto parse_out;
925 			}
926 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
927 			sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version);
928 			break;
929 
930 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
931 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
932 			/* ignore, not used by current driver */
933 			break;
934 
935 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
936 			err = iwm_firmware_store_section(sc,
937 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
938 			    tlv_len);
939 			if (err)
940 				goto parse_out;
941 			break;
942 
943 		case IWM_UCODE_TLV_PAGING:
944 			if (tlv_len != sizeof(uint32_t)) {
945 				err = EINVAL;
946 				goto parse_out;
947 			}
948 			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
949 
950 			DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
951 			    DEVNAME(sc), paging_mem_size));
952 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
953 				printf("%s: Driver only supports up to %u"
954 				    " bytes for paging image (%u requested)\n",
955 				    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
956 				    paging_mem_size);
957 				err = EINVAL;
958 				goto out;
959 			}
960 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
961 				printf("%s: Paging: image isn't multiple of %u\n",
962 				    DEVNAME(sc), IWM_FW_PAGING_SIZE);
963 				err = EINVAL;
964 				goto out;
965 			}
966 
967 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
968 			    paging_mem_size;
969 			usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
970 			fw->fw_sects[usniffer_img].paging_mem_size =
971 			    paging_mem_size;
972 			break;
973 
974 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
975 			if (tlv_len != sizeof(uint32_t)) {
976 				err = EINVAL;
977 				goto parse_out;
978 			}
979 			sc->sc_capa_n_scan_channels =
980 			  le32toh(*(uint32_t *)tlv_data);
981 			if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
982 				err = ERANGE;
983 				goto parse_out;
984 			}
985 			break;
986 
987 		case IWM_UCODE_TLV_FW_VERSION:
988 			if (tlv_len != sizeof(uint32_t) * 3) {
989 				err = EINVAL;
990 				goto parse_out;
991 			}
992 
993 			iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
994 			    le32toh(((uint32_t *)tlv_data)[0]),
995 			    le32toh(((uint32_t *)tlv_data)[1]),
996 			    le32toh(((uint32_t *)tlv_data)[2]));
997 			break;
998 
999 		case IWM_UCODE_TLV_FW_DBG_DEST:
1000 		case IWM_UCODE_TLV_FW_DBG_CONF:
1001 		case IWM_UCODE_TLV_UMAC_DEBUG_ADDRS:
1002 		case IWM_UCODE_TLV_LMAC_DEBUG_ADDRS:
1003 		case IWM_UCODE_TLV_TYPE_DEBUG_INFO:
1004 		case IWM_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1005 		case IWM_UCODE_TLV_TYPE_HCMD:
1006 		case IWM_UCODE_TLV_TYPE_REGIONS:
1007 		case IWM_UCODE_TLV_TYPE_TRIGGERS:
1008 			break;
1009 
1010 		case IWM_UCODE_TLV_HW_TYPE:
1011 			break;
1012 
1013 		case IWM_UCODE_TLV_FW_MEM_SEG:
1014 			break;
1015 
1016 		/* undocumented TLVs found in iwm-9000-43 image */
1017 		case 0x1000003:
1018 		case 0x1000004:
1019 			break;
1020 
1021 		default:
1022 			err = EINVAL;
1023 			goto parse_out;
1024 		}
1025 
1026 		/*
1027 		 * Check for size_t overflow and ignore missing padding at
1028 		 * end of firmware file.
1029 		 */
1030 		if (roundup(tlv_len, 4) > len)
1031 			break;
1032 
1033 		len -= roundup(tlv_len, 4);
1034 		data += roundup(tlv_len, 4);
1035 	}
1036 
1037 	KASSERT(err == 0);
1038 
1039  parse_out:
1040 	if (err) {
1041 		printf("%s: firmware parse error %d, "
1042 		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1043 	}
1044 
1045  out:
1046 	if (err) {
1047 		fw->fw_status = IWM_FW_STATUS_NONE;
1048 		if (fw->fw_rawdata != NULL)
1049 			iwm_fw_info_free(fw);
1050 	} else
1051 		fw->fw_status = IWM_FW_STATUS_DONE;
1052 	wakeup(&sc->sc_fw);
1053 
1054 	return err;
1055 }
1056 
1057 uint32_t
iwm_read_prph_unlocked(struct iwm_softc * sc,uint32_t addr)1058 iwm_read_prph_unlocked(struct iwm_softc *sc, uint32_t addr)
1059 {
1060 	IWM_WRITE(sc,
1061 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1062 	IWM_BARRIER_READ_WRITE(sc);
1063 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
1064 }
1065 
1066 uint32_t
iwm_read_prph(struct iwm_softc * sc,uint32_t addr)1067 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
1068 {
1069 	iwm_nic_assert_locked(sc);
1070 	return iwm_read_prph_unlocked(sc, addr);
1071 }
1072 
1073 void
iwm_write_prph_unlocked(struct iwm_softc * sc,uint32_t addr,uint32_t val)1074 iwm_write_prph_unlocked(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1075 {
1076 	IWM_WRITE(sc,
1077 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1078 	IWM_BARRIER_WRITE(sc);
1079 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
1080 }
1081 
1082 void
iwm_write_prph(struct iwm_softc * sc,uint32_t addr,uint32_t val)1083 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1084 {
1085 	iwm_nic_assert_locked(sc);
1086 	iwm_write_prph_unlocked(sc, addr, val);
1087 }
1088 
1089 void
iwm_write_prph64(struct iwm_softc * sc,uint64_t addr,uint64_t val)1090 iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
1091 {
1092 	iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1093 	iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1094 }
1095 
1096 int
iwm_read_mem(struct iwm_softc * sc,uint32_t addr,void * buf,int dwords)1097 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
1098 {
1099 	int offs, err = 0;
1100 	uint32_t *vals = buf;
1101 
1102 	if (iwm_nic_lock(sc)) {
1103 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1104 		for (offs = 0; offs < dwords; offs++)
1105 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1106 		iwm_nic_unlock(sc);
1107 	} else {
1108 		err = EBUSY;
1109 	}
1110 	return err;
1111 }
1112 
1113 int
iwm_write_mem(struct iwm_softc * sc,uint32_t addr,const void * buf,int dwords)1114 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1115 {
1116 	int offs;
1117 	const uint32_t *vals = buf;
1118 
1119 	if (iwm_nic_lock(sc)) {
1120 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1121 		/* WADDR auto-increments */
1122 		for (offs = 0; offs < dwords; offs++) {
1123 			uint32_t val = vals ? vals[offs] : 0;
1124 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1125 		}
1126 		iwm_nic_unlock(sc);
1127 	} else {
1128 		return EBUSY;
1129 	}
1130 	return 0;
1131 }
1132 
1133 int
iwm_write_mem32(struct iwm_softc * sc,uint32_t addr,uint32_t val)1134 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1135 {
1136 	return iwm_write_mem(sc, addr, &val, 1);
1137 }
1138 
1139 int
iwm_poll_bit(struct iwm_softc * sc,int reg,uint32_t bits,uint32_t mask,int timo)1140 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1141     int timo)
1142 {
1143 	for (;;) {
1144 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1145 			return 1;
1146 		}
1147 		if (timo < 10) {
1148 			return 0;
1149 		}
1150 		timo -= 10;
1151 		DELAY(10);
1152 	}
1153 }
1154 
1155 int
iwm_nic_lock(struct iwm_softc * sc)1156 iwm_nic_lock(struct iwm_softc *sc)
1157 {
1158 	if (sc->sc_nic_locks > 0) {
1159 		iwm_nic_assert_locked(sc);
1160 		sc->sc_nic_locks++;
1161 		return 1; /* already locked */
1162 	}
1163 
1164 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1165 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1166 
1167 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
1168 		DELAY(2);
1169 
1170 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1171 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1172 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1173 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1174 		sc->sc_nic_locks++;
1175 		return 1;
1176 	}
1177 
1178 	printf("%s: acquiring device failed\n", DEVNAME(sc));
1179 	return 0;
1180 }
1181 
1182 void
iwm_nic_assert_locked(struct iwm_softc * sc)1183 iwm_nic_assert_locked(struct iwm_softc *sc)
1184 {
1185 	if (sc->sc_nic_locks <= 0)
1186 		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1187 }
1188 
1189 void
iwm_nic_unlock(struct iwm_softc * sc)1190 iwm_nic_unlock(struct iwm_softc *sc)
1191 {
1192 	if (sc->sc_nic_locks > 0) {
1193 		if (--sc->sc_nic_locks == 0)
1194 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1195 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1196 	} else
1197 		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1198 }
1199 
1200 int
iwm_set_bits_mask_prph(struct iwm_softc * sc,uint32_t reg,uint32_t bits,uint32_t mask)1201 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1202     uint32_t mask)
1203 {
1204 	uint32_t val;
1205 
1206 	if (iwm_nic_lock(sc)) {
1207 		val = iwm_read_prph(sc, reg) & mask;
1208 		val |= bits;
1209 		iwm_write_prph(sc, reg, val);
1210 		iwm_nic_unlock(sc);
1211 		return 0;
1212 	}
1213 	return EBUSY;
1214 }
1215 
1216 int
iwm_set_bits_prph(struct iwm_softc * sc,uint32_t reg,uint32_t bits)1217 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1218 {
1219 	return iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1220 }
1221 
1222 int
iwm_clear_bits_prph(struct iwm_softc * sc,uint32_t reg,uint32_t bits)1223 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1224 {
1225 	return iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1226 }
1227 
1228 int
iwm_dma_contig_alloc(bus_dma_tag_t tag,struct iwm_dma_info * dma,bus_size_t size,bus_size_t alignment)1229 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1230     bus_size_t size, bus_size_t alignment)
1231 {
1232 	int nsegs, err;
1233 	caddr_t va;
1234 
1235 	dma->tag = tag;
1236 	dma->size = size;
1237 
1238 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1239 	    &dma->map);
1240 	if (err)
1241 		goto fail;
1242 
1243 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1244 	    BUS_DMA_NOWAIT);
1245 	if (err)
1246 		goto fail;
1247 
1248 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1249 	    BUS_DMA_NOWAIT);
1250 	if (err)
1251 		goto fail;
1252 	dma->vaddr = va;
1253 
1254 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1255 	    BUS_DMA_NOWAIT);
1256 	if (err)
1257 		goto fail;
1258 
1259 	memset(dma->vaddr, 0, size);
1260 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1261 	dma->paddr = dma->map->dm_segs[0].ds_addr;
1262 
1263 	return 0;
1264 
1265 fail:	iwm_dma_contig_free(dma);
1266 	return err;
1267 }
1268 
1269 void
iwm_dma_contig_free(struct iwm_dma_info * dma)1270 iwm_dma_contig_free(struct iwm_dma_info *dma)
1271 {
1272 	if (dma->map != NULL) {
1273 		if (dma->vaddr != NULL) {
1274 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1275 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1276 			bus_dmamap_unload(dma->tag, dma->map);
1277 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1278 			bus_dmamem_free(dma->tag, &dma->seg, 1);
1279 			dma->vaddr = NULL;
1280 		}
1281 		bus_dmamap_destroy(dma->tag, dma->map);
1282 		dma->map = NULL;
1283 	}
1284 }
1285 
1286 int
iwm_alloc_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1287 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1288 {
1289 	bus_size_t size;
1290 	size_t descsz;
1291 	int count, i, err;
1292 
1293 	ring->cur = 0;
1294 
1295 	if (sc->sc_mqrx_supported) {
1296 		count = IWM_RX_MQ_RING_COUNT;
1297 		descsz = sizeof(uint64_t);
1298 	} else {
1299 		count = IWM_RX_RING_COUNT;
1300 		descsz = sizeof(uint32_t);
1301 	}
1302 
1303 	/* Allocate RX descriptors (256-byte aligned). */
1304 	size = count * descsz;
1305 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1306 	if (err) {
1307 		printf("%s: could not allocate RX ring DMA memory\n",
1308 		    DEVNAME(sc));
1309 		goto fail;
1310 	}
1311 	ring->desc = ring->free_desc_dma.vaddr;
1312 
1313 	/* Allocate RX status area (16-byte aligned). */
1314 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1315 	    sizeof(*ring->stat), 16);
1316 	if (err) {
1317 		printf("%s: could not allocate RX status DMA memory\n",
1318 		    DEVNAME(sc));
1319 		goto fail;
1320 	}
1321 	ring->stat = ring->stat_dma.vaddr;
1322 
1323 	if (sc->sc_mqrx_supported) {
1324 		size = count * sizeof(uint32_t);
1325 		err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1326 		    size, 256);
1327 		if (err) {
1328 			printf("%s: could not allocate RX ring DMA memory\n",
1329 			    DEVNAME(sc));
1330 			goto fail;
1331 		}
1332 	}
1333 
1334 	for (i = 0; i < count; i++) {
1335 		struct iwm_rx_data *data = &ring->data[i];
1336 
1337 		memset(data, 0, sizeof(*data));
1338 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1339 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1340 		    &data->map);
1341 		if (err) {
1342 			printf("%s: could not create RX buf DMA map\n",
1343 			    DEVNAME(sc));
1344 			goto fail;
1345 		}
1346 
1347 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1348 		if (err)
1349 			goto fail;
1350 	}
1351 	return 0;
1352 
1353 fail:	iwm_free_rx_ring(sc, ring);
1354 	return err;
1355 }
1356 
1357 void
iwm_disable_rx_dma(struct iwm_softc * sc)1358 iwm_disable_rx_dma(struct iwm_softc *sc)
1359 {
1360 	int ntries;
1361 
1362 	if (iwm_nic_lock(sc)) {
1363 		if (sc->sc_mqrx_supported) {
1364 			iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1365 			for (ntries = 0; ntries < 1000; ntries++) {
1366 				if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) &
1367 				    IWM_RXF_DMA_IDLE)
1368 					break;
1369 				DELAY(10);
1370 			}
1371 		} else {
1372 			IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1373 			for (ntries = 0; ntries < 1000; ntries++) {
1374 				if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)&
1375 				    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1376 					break;
1377 				DELAY(10);
1378 			}
1379 		}
1380 		iwm_nic_unlock(sc);
1381 	}
1382 }
1383 
1384 void
iwm_reset_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1385 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1386 {
1387 	ring->cur = 0;
1388 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1389 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1390 	memset(ring->stat, 0, sizeof(*ring->stat));
1391 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1392 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1393 
1394 }
1395 
1396 void
iwm_free_rx_ring(struct iwm_softc * sc,struct iwm_rx_ring * ring)1397 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1398 {
1399 	int count, i;
1400 
1401 	iwm_dma_contig_free(&ring->free_desc_dma);
1402 	iwm_dma_contig_free(&ring->stat_dma);
1403 	iwm_dma_contig_free(&ring->used_desc_dma);
1404 
1405 	if (sc->sc_mqrx_supported)
1406 		count = IWM_RX_MQ_RING_COUNT;
1407 	else
1408 		count = IWM_RX_RING_COUNT;
1409 
1410 	for (i = 0; i < count; i++) {
1411 		struct iwm_rx_data *data = &ring->data[i];
1412 
1413 		if (data->m != NULL) {
1414 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1415 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1416 			bus_dmamap_unload(sc->sc_dmat, data->map);
1417 			m_freem(data->m);
1418 			data->m = NULL;
1419 		}
1420 		if (data->map != NULL)
1421 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1422 	}
1423 }
1424 
1425 int
iwm_alloc_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring,int qid)1426 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1427 {
1428 	bus_addr_t paddr;
1429 	bus_size_t size;
1430 	int i, err;
1431 
1432 	ring->qid = qid;
1433 	ring->queued = 0;
1434 	ring->cur = 0;
1435 	ring->tail = 0;
1436 
1437 	/* Allocate TX descriptors (256-byte aligned). */
1438 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1439 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1440 	if (err) {
1441 		printf("%s: could not allocate TX ring DMA memory\n",
1442 		    DEVNAME(sc));
1443 		goto fail;
1444 	}
1445 	ring->desc = ring->desc_dma.vaddr;
1446 
1447 	/*
1448 	 * There is no need to allocate DMA buffers for unused rings.
1449 	 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
1450 	 * than we currently need.
1451 	 *
1452 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
1453 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1454 	 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1455 	 * in order to provide one queue per EDCA category.
1456 	 * Tx aggregation requires additional queues, one queue per TID for
1457 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17].
1458 	 *
1459 	 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd),
1460 	 * and Tx aggregation is not supported.
1461 	 *
1462 	 * Unfortunately, we cannot tell if DQA will be used until the
1463 	 * firmware gets loaded later, so just allocate sufficient rings
1464 	 * in order to satisfy both cases.
1465 	 */
1466 	if (qid > IWM_LAST_AGG_TX_QUEUE)
1467 		return 0;
1468 
1469 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1470 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1471 	if (err) {
1472 		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1473 		goto fail;
1474 	}
1475 	ring->cmd = ring->cmd_dma.vaddr;
1476 
1477 	paddr = ring->cmd_dma.paddr;
1478 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1479 		struct iwm_tx_data *data = &ring->data[i];
1480 		size_t mapsize;
1481 
1482 		data->cmd_paddr = paddr;
1483 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1484 		    + offsetof(struct iwm_tx_cmd, scratch);
1485 		paddr += sizeof(struct iwm_device_cmd);
1486 
1487 		/* FW commands may require more mapped space than packets. */
1488 		if (qid == IWM_CMD_QUEUE || qid == IWM_DQA_CMD_QUEUE)
1489 			mapsize = (sizeof(struct iwm_cmd_header) +
1490 			    IWM_MAX_CMD_PAYLOAD_SIZE);
1491 		else
1492 			mapsize = MCLBYTES;
1493 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
1494 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1495 		    &data->map);
1496 		if (err) {
1497 			printf("%s: could not create TX buf DMA map\n",
1498 			    DEVNAME(sc));
1499 			goto fail;
1500 		}
1501 	}
1502 	KASSERT(paddr == ring->cmd_dma.paddr + size);
1503 	return 0;
1504 
1505 fail:	iwm_free_tx_ring(sc, ring);
1506 	return err;
1507 }
1508 
1509 void
iwm_reset_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1510 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1511 {
1512 	int i;
1513 
1514 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1515 		struct iwm_tx_data *data = &ring->data[i];
1516 
1517 		if (data->m != NULL) {
1518 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1519 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1520 			bus_dmamap_unload(sc->sc_dmat, data->map);
1521 			m_freem(data->m);
1522 			data->m = NULL;
1523 		}
1524 	}
1525 	/* Clear TX descriptors. */
1526 	memset(ring->desc, 0, ring->desc_dma.size);
1527 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1528 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1529 	sc->qfullmsk &= ~(1 << ring->qid);
1530 	sc->qenablemsk &= ~(1 << ring->qid);
1531 	/* 7000 family NICs are locked while commands are in progress. */
1532 	if (ring->qid == sc->cmdqid && ring->queued > 0) {
1533 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1534 			iwm_nic_unlock(sc);
1535 	}
1536 	ring->queued = 0;
1537 	ring->cur = 0;
1538 	ring->tail = 0;
1539 }
1540 
1541 void
iwm_free_tx_ring(struct iwm_softc * sc,struct iwm_tx_ring * ring)1542 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1543 {
1544 	int i;
1545 
1546 	iwm_dma_contig_free(&ring->desc_dma);
1547 	iwm_dma_contig_free(&ring->cmd_dma);
1548 
1549 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1550 		struct iwm_tx_data *data = &ring->data[i];
1551 
1552 		if (data->m != NULL) {
1553 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1554 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1555 			bus_dmamap_unload(sc->sc_dmat, data->map);
1556 			m_freem(data->m);
1557 			data->m = NULL;
1558 		}
1559 		if (data->map != NULL)
1560 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1561 	}
1562 }
1563 
1564 void
iwm_enable_rfkill_int(struct iwm_softc * sc)1565 iwm_enable_rfkill_int(struct iwm_softc *sc)
1566 {
1567 	if (!sc->sc_msix) {
1568 		sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1569 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1570 	} else {
1571 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1572 		    sc->sc_fh_init_mask);
1573 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1574 		    ~IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL);
1575 		sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1576 	}
1577 
1578 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000)
1579 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1580 		    IWM_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
1581 }
1582 
1583 int
iwm_check_rfkill(struct iwm_softc * sc)1584 iwm_check_rfkill(struct iwm_softc *sc)
1585 {
1586 	uint32_t v;
1587 	int rv;
1588 
1589 	/*
1590 	 * "documentation" is not really helpful here:
1591 	 *  27:	HW_RF_KILL_SW
1592 	 *	Indicates state of (platform's) hardware RF-Kill switch
1593 	 *
1594 	 * But apparently when it's off, it's on ...
1595 	 */
1596 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1597 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1598 	if (rv) {
1599 		sc->sc_flags |= IWM_FLAG_RFKILL;
1600 	} else {
1601 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
1602 	}
1603 
1604 	return rv;
1605 }
1606 
1607 void
iwm_enable_interrupts(struct iwm_softc * sc)1608 iwm_enable_interrupts(struct iwm_softc *sc)
1609 {
1610 	if (!sc->sc_msix) {
1611 		sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1612 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1613 	} else {
1614 		/*
1615 		 * fh/hw_mask keeps all the unmasked causes.
1616 		 * Unlike msi, in msix cause is enabled when it is unset.
1617 		 */
1618 		sc->sc_hw_mask = sc->sc_hw_init_mask;
1619 		sc->sc_fh_mask = sc->sc_fh_init_mask;
1620 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1621 		    ~sc->sc_fh_mask);
1622 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1623 		    ~sc->sc_hw_mask);
1624 	}
1625 }
1626 
1627 void
iwm_enable_fwload_interrupt(struct iwm_softc * sc)1628 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1629 {
1630 	if (!sc->sc_msix) {
1631 		sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
1632 		IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1633 	} else {
1634 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1635 		    sc->sc_hw_init_mask);
1636 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1637 		    ~IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
1638 		sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1639 	}
1640 }
1641 
1642 void
iwm_restore_interrupts(struct iwm_softc * sc)1643 iwm_restore_interrupts(struct iwm_softc *sc)
1644 {
1645 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1646 }
1647 
1648 void
iwm_disable_interrupts(struct iwm_softc * sc)1649 iwm_disable_interrupts(struct iwm_softc *sc)
1650 {
1651 	if (!sc->sc_msix) {
1652 		IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1653 
1654 		/* acknowledge all interrupts */
1655 		IWM_WRITE(sc, IWM_CSR_INT, ~0);
1656 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1657 	} else {
1658 		IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1659 		    sc->sc_fh_init_mask);
1660 		IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1661 		    sc->sc_hw_init_mask);
1662 	}
1663 }
1664 
1665 void
iwm_ict_reset(struct iwm_softc * sc)1666 iwm_ict_reset(struct iwm_softc *sc)
1667 {
1668 	iwm_disable_interrupts(sc);
1669 
1670 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1671 	sc->ict_cur = 0;
1672 
1673 	/* Set physical address of ICT (4KB aligned). */
1674 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1675 	    IWM_CSR_DRAM_INT_TBL_ENABLE
1676 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1677 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1678 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1679 
1680 	/* Switch to ICT interrupt mode in driver. */
1681 	sc->sc_flags |= IWM_FLAG_USE_ICT;
1682 
1683 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1684 	iwm_enable_interrupts(sc);
1685 }
1686 
1687 #define IWM_HW_READY_TIMEOUT 50
1688 int
iwm_set_hw_ready(struct iwm_softc * sc)1689 iwm_set_hw_ready(struct iwm_softc *sc)
1690 {
1691 	int ready;
1692 
1693 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1694 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1695 
1696 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1697 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1698 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1699 	    IWM_HW_READY_TIMEOUT);
1700 	if (ready)
1701 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1702 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1703 
1704 	return ready;
1705 }
1706 #undef IWM_HW_READY_TIMEOUT
1707 
1708 int
iwm_prepare_card_hw(struct iwm_softc * sc)1709 iwm_prepare_card_hw(struct iwm_softc *sc)
1710 {
1711 	int t = 0;
1712 	int ntries;
1713 
1714 	if (iwm_set_hw_ready(sc))
1715 		return 0;
1716 
1717 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1718 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1719 	DELAY(1000);
1720 
1721 	for (ntries = 0; ntries < 10; ntries++) {
1722 		/* If HW is not ready, prepare the conditions to check again */
1723 		IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1724 		    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1725 
1726 		do {
1727 			if (iwm_set_hw_ready(sc))
1728 				return 0;
1729 			DELAY(200);
1730 			t += 200;
1731 		} while (t < 150000);
1732 		DELAY(25000);
1733 	}
1734 
1735 	return ETIMEDOUT;
1736 }
1737 
1738 void
iwm_apm_config(struct iwm_softc * sc)1739 iwm_apm_config(struct iwm_softc *sc)
1740 {
1741 	pcireg_t lctl, cap;
1742 
1743 	/*
1744 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
1745 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1746 	 * If so (likely), disable L0S, so device moves directly L0->L1;
1747 	 *    costs negligible amount of power savings.
1748 	 * If not (unlikely), enable L0S, so there is at least some
1749 	 *    power savings, even without L1.
1750 	 */
1751 	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1752 	    sc->sc_cap_off + PCI_PCIE_LCSR);
1753 	if (lctl & PCI_PCIE_LCSR_ASPM_L1) {
1754 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1755 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1756 	} else {
1757 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1758 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1759 	}
1760 
1761 	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1762 	    sc->sc_cap_off + PCI_PCIE_DCSR2);
1763 	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1764 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
1765 	    DEVNAME(sc),
1766 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
1767 	    sc->sc_ltr_enabled ? "En" : "Dis"));
1768 }
1769 
1770 /*
1771  * Start up NIC's basic functionality after it has been reset
1772  * e.g. after platform boot or shutdown.
1773  * NOTE:  This does not load uCode nor start the embedded processor
1774  */
1775 int
iwm_apm_init(struct iwm_softc * sc)1776 iwm_apm_init(struct iwm_softc *sc)
1777 {
1778 	int err = 0;
1779 
1780 	/* Disable L0S exit timer (platform NMI workaround) */
1781 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
1782 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1783 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1784 
1785 	/*
1786 	 * Disable L0s without affecting L1;
1787 	 *  don't wait for ICH L0s (ICH bug W/A)
1788 	 */
1789 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1790 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1791 
1792 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
1793 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1794 
1795 	/*
1796 	 * Enable HAP INTA (interrupt from management bus) to
1797 	 * wake device's PCI Express link L1a -> L0s
1798 	 */
1799 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1800 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1801 
1802 	iwm_apm_config(sc);
1803 
1804 #if 0 /* not for 7k/8k */
1805 	/* Configure analog phase-lock-loop before activating to D0A */
1806 	if (trans->cfg->base_params->pll_cfg_val)
1807 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1808 		    trans->cfg->base_params->pll_cfg_val);
1809 #endif
1810 
1811 	/*
1812 	 * Set "initialization complete" bit to move adapter from
1813 	 * D0U* --> D0A* (powered-up active) state.
1814 	 */
1815 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1816 
1817 	/*
1818 	 * Wait for clock stabilization; once stabilized, access to
1819 	 * device-internal resources is supported, e.g. iwm_write_prph()
1820 	 * and accesses to uCode SRAM.
1821 	 */
1822 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1823 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1824 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1825 		printf("%s: timeout waiting for clock stabilization\n",
1826 		    DEVNAME(sc));
1827 		err = ETIMEDOUT;
1828 		goto out;
1829 	}
1830 
1831 	if (sc->host_interrupt_operation_mode) {
1832 		/*
1833 		 * This is a bit of an abuse - This is needed for 7260 / 3160
1834 		 * only check host_interrupt_operation_mode even if this is
1835 		 * not related to host_interrupt_operation_mode.
1836 		 *
1837 		 * Enable the oscillator to count wake up time for L1 exit. This
1838 		 * consumes slightly more power (100uA) - but allows to be sure
1839 		 * that we wake up from L1 on time.
1840 		 *
1841 		 * This looks weird: read twice the same register, discard the
1842 		 * value, set a bit, and yet again, read that same register
1843 		 * just to discard the value. But that's the way the hardware
1844 		 * seems to like it.
1845 		 */
1846 		if (iwm_nic_lock(sc)) {
1847 			iwm_read_prph(sc, IWM_OSC_CLK);
1848 			iwm_read_prph(sc, IWM_OSC_CLK);
1849 			iwm_nic_unlock(sc);
1850 		}
1851 		err = iwm_set_bits_prph(sc, IWM_OSC_CLK,
1852 		    IWM_OSC_CLK_FORCE_CONTROL);
1853 		if (err)
1854 			goto out;
1855 		if (iwm_nic_lock(sc)) {
1856 			iwm_read_prph(sc, IWM_OSC_CLK);
1857 			iwm_read_prph(sc, IWM_OSC_CLK);
1858 			iwm_nic_unlock(sc);
1859 		}
1860 	}
1861 
1862 	/*
1863 	 * Enable DMA clock and wait for it to stabilize.
1864 	 *
1865 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1866 	 * do not disable clocks.  This preserves any hardware bits already
1867 	 * set by default in "CLK_CTRL_REG" after reset.
1868 	 */
1869 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1870 		if (iwm_nic_lock(sc)) {
1871 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1872 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1873 			iwm_nic_unlock(sc);
1874 		}
1875 		DELAY(20);
1876 
1877 		/* Disable L1-Active */
1878 		err = iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1879 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1880 		if (err)
1881 			goto out;
1882 
1883 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
1884 		if (iwm_nic_lock(sc)) {
1885 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1886 			    IWM_APMG_RTC_INT_STT_RFKILL);
1887 			iwm_nic_unlock(sc);
1888 		}
1889 	}
1890  out:
1891 	if (err)
1892 		printf("%s: apm init error %d\n", DEVNAME(sc), err);
1893 	return err;
1894 }
1895 
1896 void
iwm_apm_stop(struct iwm_softc * sc)1897 iwm_apm_stop(struct iwm_softc *sc)
1898 {
1899 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1900 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1901 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1902 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE |
1903 	    IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
1904 	DELAY(1000);
1905 	IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1906 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
1907 	DELAY(5000);
1908 
1909 	/* stop device's busmaster DMA activity */
1910 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1911 
1912 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1913 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1914 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1915 		printf("%s: timeout waiting for master\n", DEVNAME(sc));
1916 
1917 	/*
1918 	 * Clear "initialization complete" bit to move adapter from
1919 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1920 	 */
1921 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1922 	    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1923 }
1924 
1925 void
iwm_init_msix_hw(struct iwm_softc * sc)1926 iwm_init_msix_hw(struct iwm_softc *sc)
1927 {
1928 	iwm_conf_msix_hw(sc, 0);
1929 
1930 	if (!sc->sc_msix)
1931 		return;
1932 
1933 	sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD);
1934 	sc->sc_fh_mask = sc->sc_fh_init_mask;
1935 	sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD);
1936 	sc->sc_hw_mask = sc->sc_hw_init_mask;
1937 }
1938 
1939 void
iwm_conf_msix_hw(struct iwm_softc * sc,int stopped)1940 iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1941 {
1942 	int vector = 0;
1943 
1944 	if (!sc->sc_msix) {
1945 		/* Newer chips default to MSIX. */
1946 		if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1947 			iwm_write_prph(sc, IWM_UREG_CHICK,
1948 			    IWM_UREG_CHICK_MSI_ENABLE);
1949 			iwm_nic_unlock(sc);
1950 		}
1951 		return;
1952 	}
1953 
1954 	if (!stopped && iwm_nic_lock(sc)) {
1955 		iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSIX_ENABLE);
1956 		iwm_nic_unlock(sc);
1957 	}
1958 
1959 	/* Disable all interrupts */
1960 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0);
1961 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0);
1962 
1963 	/* Map fallback-queue (command/mgmt) to a single vector */
1964 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),
1965 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1966 	/* Map RSS queue (data) to the same vector */
1967 	IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),
1968 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1969 
1970 	/* Enable the RX queues cause interrupts */
1971 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1972 	    IWM_MSIX_FH_INT_CAUSES_Q0 | IWM_MSIX_FH_INT_CAUSES_Q1);
1973 
1974 	/* Map non-RX causes to the same vector */
1975 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
1976 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1977 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
1978 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1979 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),
1980 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1981 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),
1982 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1983 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),
1984 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1985 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),
1986 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1987 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),
1988 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1989 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),
1990 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1991 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),
1992 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1993 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),
1994 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1995 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),
1996 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1997 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),
1998 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
1999 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),
2000 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
2001 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),
2002 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
2003 	IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),
2004 	    vector | IWM_MSIX_NON_AUTO_CLEAR_CAUSE);
2005 
2006 	/* Enable non-RX causes interrupts */
2007 	IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
2008 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2009 	    IWM_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2010 	    IWM_MSIX_FH_INT_CAUSES_S2D |
2011 	    IWM_MSIX_FH_INT_CAUSES_FH_ERR);
2012 	IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
2013 	    IWM_MSIX_HW_INT_CAUSES_REG_ALIVE |
2014 	    IWM_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2015 	    IWM_MSIX_HW_INT_CAUSES_REG_IML |
2016 	    IWM_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2017 	    IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2018 	    IWM_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2019 	    IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2020 	    IWM_MSIX_HW_INT_CAUSES_REG_SCD |
2021 	    IWM_MSIX_HW_INT_CAUSES_REG_FH_TX |
2022 	    IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2023 	    IWM_MSIX_HW_INT_CAUSES_REG_HAP);
2024 }
2025 
2026 int
iwm_clear_persistence_bit(struct iwm_softc * sc)2027 iwm_clear_persistence_bit(struct iwm_softc *sc)
2028 {
2029 	uint32_t hpm, wprot;
2030 
2031 	hpm = iwm_read_prph_unlocked(sc, IWM_HPM_DEBUG);
2032 	if (hpm != 0xa5a5a5a0 && (hpm & IWM_HPM_PERSISTENCE_BIT)) {
2033 		wprot = iwm_read_prph_unlocked(sc, IWM_PREG_PRPH_WPROT_9000);
2034 		if (wprot & IWM_PREG_WFPM_ACCESS) {
2035 			printf("%s: cannot clear persistence bit\n",
2036 			    DEVNAME(sc));
2037 			return EPERM;
2038 		}
2039 		iwm_write_prph_unlocked(sc, IWM_HPM_DEBUG,
2040 		    hpm & ~IWM_HPM_PERSISTENCE_BIT);
2041 	}
2042 
2043 	return 0;
2044 }
2045 
2046 int
iwm_start_hw(struct iwm_softc * sc)2047 iwm_start_hw(struct iwm_softc *sc)
2048 {
2049 	int err;
2050 
2051 	err = iwm_prepare_card_hw(sc);
2052 	if (err)
2053 		return err;
2054 
2055 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000) {
2056 		err = iwm_clear_persistence_bit(sc);
2057 		if (err)
2058 			return err;
2059 	}
2060 
2061 	/* Reset the entire device */
2062 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2063 	DELAY(5000);
2064 
2065 	err = iwm_apm_init(sc);
2066 	if (err)
2067 		return err;
2068 
2069 	iwm_init_msix_hw(sc);
2070 
2071 	iwm_enable_rfkill_int(sc);
2072 	iwm_check_rfkill(sc);
2073 
2074 	return 0;
2075 }
2076 
2077 
2078 void
iwm_stop_device(struct iwm_softc * sc)2079 iwm_stop_device(struct iwm_softc *sc)
2080 {
2081 	int chnl, ntries;
2082 	int qid;
2083 
2084 	iwm_disable_interrupts(sc);
2085 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
2086 
2087 	/* Stop all DMA channels. */
2088 	if (iwm_nic_lock(sc)) {
2089 		/* Deactivate TX scheduler. */
2090 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2091 
2092 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2093 			IWM_WRITE(sc,
2094 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
2095 			for (ntries = 0; ntries < 200; ntries++) {
2096 				uint32_t r;
2097 
2098 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
2099 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
2100 				    chnl))
2101 					break;
2102 				DELAY(20);
2103 			}
2104 		}
2105 		iwm_nic_unlock(sc);
2106 	}
2107 	iwm_disable_rx_dma(sc);
2108 
2109 	iwm_reset_rx_ring(sc, &sc->rxq);
2110 
2111 	for (qid = 0; qid < nitems(sc->txq); qid++)
2112 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
2113 
2114 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2115 		if (iwm_nic_lock(sc)) {
2116 			/* Power-down device's busmaster DMA clocks */
2117 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
2118 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
2119 			iwm_nic_unlock(sc);
2120 		}
2121 		DELAY(5);
2122 	}
2123 
2124 	/* Make sure (redundant) we've released our request to stay awake */
2125 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
2126 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2127 	if (sc->sc_nic_locks > 0)
2128 		printf("%s: %d active NIC locks forcefully cleared\n",
2129 		    DEVNAME(sc), sc->sc_nic_locks);
2130 	sc->sc_nic_locks = 0;
2131 
2132 	/* Stop the device, and put it in low power state */
2133 	iwm_apm_stop(sc);
2134 
2135 	/* Reset the on-board processor. */
2136 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2137 	DELAY(5000);
2138 
2139 	/*
2140 	 * Upon stop, the IVAR table gets erased, so msi-x won't
2141 	 * work. This causes a bug in RF-KILL flows, since the interrupt
2142 	 * that enables radio won't fire on the correct irq, and the
2143 	 * driver won't be able to handle the interrupt.
2144 	 * Configure the IVAR table again after reset.
2145 	 */
2146 	iwm_conf_msix_hw(sc, 1);
2147 
2148 	/*
2149 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2150 	 * Clear the interrupt again.
2151 	 */
2152 	iwm_disable_interrupts(sc);
2153 
2154 	/* Even though we stop the HW we still want the RF kill interrupt. */
2155 	iwm_enable_rfkill_int(sc);
2156 	iwm_check_rfkill(sc);
2157 
2158 	iwm_prepare_card_hw(sc);
2159 }
2160 
2161 void
iwm_nic_config(struct iwm_softc * sc)2162 iwm_nic_config(struct iwm_softc *sc)
2163 {
2164 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2165 	uint32_t mask, val, reg_val = 0;
2166 
2167 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
2168 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
2169 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
2170 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
2171 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
2172 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
2173 
2174 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2175 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2176 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2177 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2178 
2179 	/* radio configuration */
2180 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2181 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2182 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2183 
2184 	mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2185 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2186 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2187 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2188 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2189 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2190 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2191 
2192 	val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
2193 	val &= ~mask;
2194 	val |= reg_val;
2195 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
2196 
2197 	/*
2198 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
2199 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
2200 	 * to lose ownership and not being able to obtain it back.
2201 	 */
2202 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2203 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2204 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
2205 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
2206 }
2207 
2208 int
iwm_nic_rx_init(struct iwm_softc * sc)2209 iwm_nic_rx_init(struct iwm_softc *sc)
2210 {
2211 	if (sc->sc_mqrx_supported)
2212 		return iwm_nic_rx_mq_init(sc);
2213 	else
2214 		return iwm_nic_rx_legacy_init(sc);
2215 }
2216 
2217 int
iwm_nic_rx_mq_init(struct iwm_softc * sc)2218 iwm_nic_rx_mq_init(struct iwm_softc *sc)
2219 {
2220 	int enabled;
2221 
2222 	if (!iwm_nic_lock(sc))
2223 		return EBUSY;
2224 
2225 	/* Stop RX DMA. */
2226 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
2227 	/* Disable RX used and free queue operation. */
2228 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
2229 
2230 	iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
2231 	    sc->rxq.free_desc_dma.paddr);
2232 	iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
2233 	    sc->rxq.used_desc_dma.paddr);
2234 	iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
2235 	    sc->rxq.stat_dma.paddr);
2236 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
2237 	iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
2238 	iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
2239 
2240 	/* We configure only queue 0 for now. */
2241 	enabled = ((1 << 0) << 16) | (1 << 0);
2242 
2243 	/* Enable RX DMA, 4KB buffer size. */
2244 	iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
2245 	    IWM_RFH_DMA_EN_ENABLE_VAL |
2246 	    IWM_RFH_RXF_DMA_RB_SIZE_4K |
2247 	    IWM_RFH_RXF_DMA_MIN_RB_4_8 |
2248 	    IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
2249 	    IWM_RFH_RXF_DMA_RBDCB_SIZE_512);
2250 
2251 	/* Enable RX DMA snooping. */
2252 	iwm_write_prph(sc, IWM_RFH_GEN_CFG,
2253 	    IWM_RFH_GEN_CFG_RFH_DMA_SNOOP |
2254 	    IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP |
2255 	    (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
2256 	    IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128));
2257 
2258 	/* Enable the configured queue(s). */
2259 	iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
2260 
2261 	iwm_nic_unlock(sc);
2262 
2263 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2264 
2265 	IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
2266 
2267 	return 0;
2268 }
2269 
2270 int
iwm_nic_rx_legacy_init(struct iwm_softc * sc)2271 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2272 {
2273 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
2274 
2275 	iwm_disable_rx_dma(sc);
2276 
2277 	if (!iwm_nic_lock(sc))
2278 		return EBUSY;
2279 
2280 	/* reset and flush pointers */
2281 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
2282 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
2283 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
2284 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
2285 
2286 	/* Set physical address of RX ring (256-byte aligned). */
2287 	IWM_WRITE(sc,
2288 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8);
2289 
2290 	/* Set physical address of RX status (16-byte aligned). */
2291 	IWM_WRITE(sc,
2292 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
2293 
2294 	/* Enable RX. */
2295 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
2296 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
2297 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
2298 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
2299 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
2300 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
2301 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
2302 
2303 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2304 
2305 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
2306 	if (sc->host_interrupt_operation_mode)
2307 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
2308 
2309 	iwm_nic_unlock(sc);
2310 
2311 	/*
2312 	 * This value should initially be 0 (before preparing any RBs),
2313 	 * and should be 8 after preparing the first 8 RBs (for example).
2314 	 */
2315 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
2316 
2317 	return 0;
2318 }
2319 
2320 int
iwm_nic_tx_init(struct iwm_softc * sc)2321 iwm_nic_tx_init(struct iwm_softc *sc)
2322 {
2323 	int qid, err;
2324 
2325 	if (!iwm_nic_lock(sc))
2326 		return EBUSY;
2327 
2328 	/* Deactivate TX scheduler. */
2329 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2330 
2331 	/* Set physical address of "keep warm" page (16-byte aligned). */
2332 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
2333 
2334 	for (qid = 0; qid < nitems(sc->txq); qid++) {
2335 		struct iwm_tx_ring *txq = &sc->txq[qid];
2336 
2337 		/* Set physical address of TX ring (256-byte aligned). */
2338 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
2339 		    txq->desc_dma.paddr >> 8);
2340 	}
2341 
2342 	err = iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
2343 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
2344 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
2345 
2346 	iwm_nic_unlock(sc);
2347 
2348 	return err;
2349 }
2350 
2351 int
iwm_nic_init(struct iwm_softc * sc)2352 iwm_nic_init(struct iwm_softc *sc)
2353 {
2354 	int err;
2355 
2356 	iwm_apm_init(sc);
2357 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2358 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2359 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2360 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2361 
2362 	iwm_nic_config(sc);
2363 
2364 	err = iwm_nic_rx_init(sc);
2365 	if (err)
2366 		return err;
2367 
2368 	err = iwm_nic_tx_init(sc);
2369 	if (err)
2370 		return err;
2371 
2372 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2373 
2374 	return 0;
2375 }
2376 
2377 /* Map a TID to an ieee80211_edca_ac category. */
2378 const uint8_t iwm_tid_to_ac[IWM_MAX_TID_COUNT] = {
2379 	EDCA_AC_BE,
2380 	EDCA_AC_BK,
2381 	EDCA_AC_BK,
2382 	EDCA_AC_BE,
2383 	EDCA_AC_VI,
2384 	EDCA_AC_VI,
2385 	EDCA_AC_VO,
2386 	EDCA_AC_VO,
2387 };
2388 
2389 /* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2390 const uint8_t iwm_ac_to_tx_fifo[] = {
2391 	IWM_TX_FIFO_BE,
2392 	IWM_TX_FIFO_BK,
2393 	IWM_TX_FIFO_VI,
2394 	IWM_TX_FIFO_VO,
2395 };
2396 
2397 int
iwm_enable_ac_txq(struct iwm_softc * sc,int qid,int fifo)2398 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2399 {
2400 	int err;
2401 	iwm_nic_assert_locked(sc);
2402 
2403 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2404 
2405 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2406 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2407 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2408 
2409 	err = iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2410 	if (err) {
2411 		return err;
2412 	}
2413 
2414 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2415 
2416 	iwm_write_mem32(sc,
2417 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2418 
2419 	/* Set scheduler window size and frame limit. */
2420 	iwm_write_mem32(sc,
2421 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2422 	    sizeof(uint32_t),
2423 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2424 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2425 	    ((IWM_FRAME_LIMIT
2426 		<< IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2427 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2428 
2429 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2430 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2431 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2432 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2433 	    IWM_SCD_QUEUE_STTS_REG_MSK);
2434 
2435 	if (qid == sc->cmdqid)
2436 		iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2437 		    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
2438 
2439 	return 0;
2440 }
2441 
2442 int
iwm_enable_txq(struct iwm_softc * sc,int sta_id,int qid,int fifo,int aggregate,uint8_t tid,uint16_t ssn)2443 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo,
2444     int aggregate, uint8_t tid, uint16_t ssn)
2445 {
2446 	struct iwm_tx_ring *ring = &sc->txq[qid];
2447 	struct iwm_scd_txq_cfg_cmd cmd;
2448 	int err, idx, scd_bug;
2449 
2450 	iwm_nic_assert_locked(sc);
2451 
2452 	/*
2453 	 * If we need to move the SCD write pointer by steps of
2454 	 * 0x40, 0x80 or 0xc0, it gets stuck.
2455 	 * This is really ugly, but this is the easiest way out for
2456 	 * this sad hardware issue.
2457 	 * This bug has been fixed on devices 9000 and up.
2458 	 */
2459 	scd_bug = !sc->sc_mqrx_supported &&
2460 		!((ssn - ring->cur) & 0x3f) &&
2461 		(ssn != ring->cur);
2462 	if (scd_bug)
2463 		ssn = (ssn + 1) & 0xfff;
2464 
2465 	idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
2466 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx);
2467 	ring->cur = idx;
2468 	ring->tail = idx;
2469 
2470 	memset(&cmd, 0, sizeof(cmd));
2471 	cmd.tid = tid;
2472 	cmd.scd_queue = qid;
2473 	cmd.enable = 1;
2474 	cmd.sta_id = sta_id;
2475 	cmd.tx_fifo = fifo;
2476 	cmd.aggregate = aggregate;
2477 	cmd.ssn = htole16(ssn);
2478 	cmd.window = IWM_FRAME_LIMIT;
2479 
2480 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
2481 	    sizeof(cmd), &cmd);
2482 	if (err)
2483 		return err;
2484 
2485 	sc->qenablemsk |= (1 << qid);
2486 	return 0;
2487 }
2488 
2489 int
iwm_disable_txq(struct iwm_softc * sc,int sta_id,int qid,uint8_t tid)2490 iwm_disable_txq(struct iwm_softc *sc, int sta_id, int qid, uint8_t tid)
2491 {
2492 	struct iwm_scd_txq_cfg_cmd cmd;
2493 	int err;
2494 
2495 	memset(&cmd, 0, sizeof(cmd));
2496 	cmd.tid = tid;
2497 	cmd.scd_queue = qid;
2498 	cmd.enable = 0;
2499 	cmd.sta_id = sta_id;
2500 
2501 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
2502 	if (err)
2503 		return err;
2504 
2505 	sc->qenablemsk &= ~(1 << qid);
2506 	return 0;
2507 }
2508 
2509 int
iwm_post_alive(struct iwm_softc * sc)2510 iwm_post_alive(struct iwm_softc *sc)
2511 {
2512 	int nwords;
2513 	int err, chnl;
2514 	uint32_t base;
2515 
2516 	if (!iwm_nic_lock(sc))
2517 		return EBUSY;
2518 
2519 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2520 
2521 	iwm_ict_reset(sc);
2522 
2523 	iwm_nic_unlock(sc);
2524 
2525 	/* Clear TX scheduler state in SRAM. */
2526 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2527 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
2528 	    / sizeof(uint32_t);
2529 	err = iwm_write_mem(sc,
2530 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2531 	    NULL, nwords);
2532 	if (err)
2533 		return err;
2534 
2535 	if (!iwm_nic_lock(sc))
2536 		return EBUSY;
2537 
2538 	/* Set physical address of TX scheduler rings (1KB aligned). */
2539 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2540 
2541 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2542 
2543 	/* enable command channel */
2544 	err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
2545 	if (err) {
2546 		iwm_nic_unlock(sc);
2547 		return err;
2548 	}
2549 
2550 	/* Activate TX scheduler. */
2551 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2552 
2553 	/* Enable DMA channels. */
2554 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2555 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2556 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2557 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2558 	}
2559 
2560 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2561 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2562 
2563 	iwm_nic_unlock(sc);
2564 
2565 	/* Enable L1-Active */
2566 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
2567 		err = iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2568 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2569 	}
2570 
2571 	return err;
2572 }
2573 
2574 struct iwm_phy_db_entry *
iwm_phy_db_get_section(struct iwm_softc * sc,uint16_t type,uint16_t chg_id)2575 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2576 {
2577 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2578 
2579 	if (type >= IWM_PHY_DB_MAX)
2580 		return NULL;
2581 
2582 	switch (type) {
2583 	case IWM_PHY_DB_CFG:
2584 		return &phy_db->cfg;
2585 	case IWM_PHY_DB_CALIB_NCH:
2586 		return &phy_db->calib_nch;
2587 	case IWM_PHY_DB_CALIB_CHG_PAPD:
2588 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2589 			return NULL;
2590 		return &phy_db->calib_ch_group_papd[chg_id];
2591 	case IWM_PHY_DB_CALIB_CHG_TXP:
2592 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2593 			return NULL;
2594 		return &phy_db->calib_ch_group_txp[chg_id];
2595 	default:
2596 		return NULL;
2597 	}
2598 	return NULL;
2599 }
2600 
2601 int
iwm_phy_db_set_section(struct iwm_softc * sc,struct iwm_calib_res_notif_phy_db * phy_db_notif)2602 iwm_phy_db_set_section(struct iwm_softc *sc,
2603     struct iwm_calib_res_notif_phy_db *phy_db_notif)
2604 {
2605 	uint16_t type = le16toh(phy_db_notif->type);
2606 	uint16_t size  = le16toh(phy_db_notif->length);
2607 	struct iwm_phy_db_entry *entry;
2608 	uint16_t chg_id = 0;
2609 
2610 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2611 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
2612 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2613 
2614 	entry = iwm_phy_db_get_section(sc, type, chg_id);
2615 	if (!entry)
2616 		return EINVAL;
2617 
2618 	if (entry->data)
2619 		free(entry->data, M_DEVBUF, entry->size);
2620 	entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
2621 	if (!entry->data) {
2622 		entry->size = 0;
2623 		return ENOMEM;
2624 	}
2625 	memcpy(entry->data, phy_db_notif->data, size);
2626 	entry->size = size;
2627 
2628 	return 0;
2629 }
2630 
2631 int
iwm_is_valid_channel(uint16_t ch_id)2632 iwm_is_valid_channel(uint16_t ch_id)
2633 {
2634 	if (ch_id <= 14 ||
2635 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2636 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2637 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2638 		return 1;
2639 	return 0;
2640 }
2641 
2642 uint8_t
iwm_ch_id_to_ch_index(uint16_t ch_id)2643 iwm_ch_id_to_ch_index(uint16_t ch_id)
2644 {
2645 	if (!iwm_is_valid_channel(ch_id))
2646 		return 0xff;
2647 
2648 	if (ch_id <= 14)
2649 		return ch_id - 1;
2650 	if (ch_id <= 64)
2651 		return (ch_id + 20) / 4;
2652 	if (ch_id <= 140)
2653 		return (ch_id - 12) / 4;
2654 	return (ch_id - 13) / 4;
2655 }
2656 
2657 
2658 uint16_t
iwm_channel_id_to_papd(uint16_t ch_id)2659 iwm_channel_id_to_papd(uint16_t ch_id)
2660 {
2661 	if (!iwm_is_valid_channel(ch_id))
2662 		return 0xff;
2663 
2664 	if (1 <= ch_id && ch_id <= 14)
2665 		return 0;
2666 	if (36 <= ch_id && ch_id <= 64)
2667 		return 1;
2668 	if (100 <= ch_id && ch_id <= 140)
2669 		return 2;
2670 	return 3;
2671 }
2672 
2673 uint16_t
iwm_channel_id_to_txp(struct iwm_softc * sc,uint16_t ch_id)2674 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2675 {
2676 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2677 	struct iwm_phy_db_chg_txp *txp_chg;
2678 	int i;
2679 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2680 
2681 	if (ch_index == 0xff)
2682 		return 0xff;
2683 
2684 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2685 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2686 		if (!txp_chg)
2687 			return 0xff;
2688 		/*
2689 		 * Looking for the first channel group the max channel
2690 		 * of which is higher than the requested channel.
2691 		 */
2692 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2693 			return i;
2694 	}
2695 	return 0xff;
2696 }
2697 
2698 int
iwm_phy_db_get_section_data(struct iwm_softc * sc,uint32_t type,uint8_t ** data,uint16_t * size,uint16_t ch_id)2699 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2700     uint16_t *size, uint16_t ch_id)
2701 {
2702 	struct iwm_phy_db_entry *entry;
2703 	uint16_t ch_group_id = 0;
2704 
2705 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2706 		ch_group_id = iwm_channel_id_to_papd(ch_id);
2707 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2708 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2709 
2710 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2711 	if (!entry)
2712 		return EINVAL;
2713 
2714 	*data = entry->data;
2715 	*size = entry->size;
2716 
2717 	return 0;
2718 }
2719 
2720 int
iwm_send_phy_db_cmd(struct iwm_softc * sc,uint16_t type,uint16_t length,void * data)2721 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2722     void *data)
2723 {
2724 	struct iwm_phy_db_cmd phy_db_cmd;
2725 	struct iwm_host_cmd cmd = {
2726 		.id = IWM_PHY_DB_CMD,
2727 		.flags = IWM_CMD_ASYNC,
2728 	};
2729 
2730 	phy_db_cmd.type = le16toh(type);
2731 	phy_db_cmd.length = le16toh(length);
2732 
2733 	cmd.data[0] = &phy_db_cmd;
2734 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2735 	cmd.data[1] = data;
2736 	cmd.len[1] = length;
2737 
2738 	return iwm_send_cmd(sc, &cmd);
2739 }
2740 
2741 int
iwm_phy_db_send_all_channel_groups(struct iwm_softc * sc,uint16_t type,uint8_t max_ch_groups)2742 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2743     uint8_t max_ch_groups)
2744 {
2745 	uint16_t i;
2746 	int err;
2747 	struct iwm_phy_db_entry *entry;
2748 
2749 	for (i = 0; i < max_ch_groups; i++) {
2750 		entry = iwm_phy_db_get_section(sc, type, i);
2751 		if (!entry)
2752 			return EINVAL;
2753 
2754 		if (!entry->size)
2755 			continue;
2756 
2757 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2758 		if (err)
2759 			return err;
2760 
2761 		DELAY(1000);
2762 	}
2763 
2764 	return 0;
2765 }
2766 
2767 int
iwm_send_phy_db_data(struct iwm_softc * sc)2768 iwm_send_phy_db_data(struct iwm_softc *sc)
2769 {
2770 	uint8_t *data = NULL;
2771 	uint16_t size = 0;
2772 	int err;
2773 
2774 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2775 	if (err)
2776 		return err;
2777 
2778 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2779 	if (err)
2780 		return err;
2781 
2782 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2783 	    &data, &size, 0);
2784 	if (err)
2785 		return err;
2786 
2787 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2788 	if (err)
2789 		return err;
2790 
2791 	err = iwm_phy_db_send_all_channel_groups(sc,
2792 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2793 	if (err)
2794 		return err;
2795 
2796 	err = iwm_phy_db_send_all_channel_groups(sc,
2797 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2798 	if (err)
2799 		return err;
2800 
2801 	return 0;
2802 }
2803 
2804 /*
2805  * For the high priority TE use a time event type that has similar priority to
2806  * the FW's action scan priority.
2807  */
2808 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2809 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2810 
2811 int
iwm_send_time_event_cmd(struct iwm_softc * sc,const struct iwm_time_event_cmd * cmd)2812 iwm_send_time_event_cmd(struct iwm_softc *sc,
2813     const struct iwm_time_event_cmd *cmd)
2814 {
2815 	struct iwm_rx_packet *pkt;
2816 	struct iwm_time_event_resp *resp;
2817 	struct iwm_host_cmd hcmd = {
2818 		.id = IWM_TIME_EVENT_CMD,
2819 		.flags = IWM_CMD_WANT_RESP,
2820 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2821 	};
2822 	uint32_t resp_len;
2823 	int err;
2824 
2825 	hcmd.data[0] = cmd;
2826 	hcmd.len[0] = sizeof(*cmd);
2827 	err = iwm_send_cmd(sc, &hcmd);
2828 	if (err)
2829 		return err;
2830 
2831 	pkt = hcmd.resp_pkt;
2832 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2833 		err = EIO;
2834 		goto out;
2835 	}
2836 
2837 	resp_len = iwm_rx_packet_payload_len(pkt);
2838 	if (resp_len != sizeof(*resp)) {
2839 		err = EIO;
2840 		goto out;
2841 	}
2842 
2843 	resp = (void *)pkt->data;
2844 	if (le32toh(resp->status) == 0)
2845 		sc->sc_time_event_uid = le32toh(resp->unique_id);
2846 	else
2847 		err = EIO;
2848 out:
2849 	iwm_free_resp(sc, &hcmd);
2850 	return err;
2851 }
2852 
2853 void
iwm_protect_session(struct iwm_softc * sc,struct iwm_node * in,uint32_t duration,uint32_t max_delay)2854 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2855     uint32_t duration, uint32_t max_delay)
2856 {
2857 	struct iwm_time_event_cmd time_cmd;
2858 
2859 	/* Do nothing if a time event is already scheduled. */
2860 	if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2861 		return;
2862 
2863 	memset(&time_cmd, 0, sizeof(time_cmd));
2864 
2865 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2866 	time_cmd.id_and_color =
2867 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2868 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2869 
2870 	time_cmd.apply_time = htole32(0);
2871 
2872 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2873 	time_cmd.max_delay = htole32(max_delay);
2874 	/* TODO: why do we need to interval = bi if it is not periodic? */
2875 	time_cmd.interval = htole32(1);
2876 	time_cmd.duration = htole32(duration);
2877 	time_cmd.repeat = 1;
2878 	time_cmd.policy
2879 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2880 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
2881 		IWM_T2_V2_START_IMMEDIATELY);
2882 
2883 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2884 		sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2885 
2886 	DELAY(100);
2887 }
2888 
2889 void
iwm_unprotect_session(struct iwm_softc * sc,struct iwm_node * in)2890 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2891 {
2892 	struct iwm_time_event_cmd time_cmd;
2893 
2894 	/* Do nothing if the time event has already ended. */
2895 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2896 		return;
2897 
2898 	memset(&time_cmd, 0, sizeof(time_cmd));
2899 
2900 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2901 	time_cmd.id_and_color =
2902 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2903 	time_cmd.id = htole32(sc->sc_time_event_uid);
2904 
2905 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2906 		sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2907 
2908 	DELAY(100);
2909 }
2910 
2911 /*
2912  * NVM read access and content parsing.  We do not support
2913  * external NVM or writing NVM.
2914  */
2915 
2916 /* list of NVM sections we are allowed/need to read */
2917 const int iwm_nvm_to_read[] = {
2918 	IWM_NVM_SECTION_TYPE_HW,
2919 	IWM_NVM_SECTION_TYPE_SW,
2920 	IWM_NVM_SECTION_TYPE_REGULATORY,
2921 	IWM_NVM_SECTION_TYPE_CALIBRATION,
2922 	IWM_NVM_SECTION_TYPE_PRODUCTION,
2923 	IWM_NVM_SECTION_TYPE_REGULATORY_SDP,
2924 	IWM_NVM_SECTION_TYPE_HW_8000,
2925 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2926 	IWM_NVM_SECTION_TYPE_PHY_SKU,
2927 };
2928 
2929 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
2930 
2931 #define IWM_NVM_WRITE_OPCODE 1
2932 #define IWM_NVM_READ_OPCODE 0
2933 
2934 int
iwm_nvm_read_chunk(struct iwm_softc * sc,uint16_t section,uint16_t offset,uint16_t length,uint8_t * data,uint16_t * len)2935 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2936     uint16_t length, uint8_t *data, uint16_t *len)
2937 {
2938 	offset = 0;
2939 	struct iwm_nvm_access_cmd nvm_access_cmd = {
2940 		.offset = htole16(offset),
2941 		.length = htole16(length),
2942 		.type = htole16(section),
2943 		.op_code = IWM_NVM_READ_OPCODE,
2944 	};
2945 	struct iwm_nvm_access_resp *nvm_resp;
2946 	struct iwm_rx_packet *pkt;
2947 	struct iwm_host_cmd cmd = {
2948 		.id = IWM_NVM_ACCESS_CMD,
2949 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2950 		.resp_pkt_len = IWM_CMD_RESP_MAX,
2951 		.data = { &nvm_access_cmd, },
2952 	};
2953 	int err, offset_read;
2954 	size_t bytes_read;
2955 	uint8_t *resp_data;
2956 
2957 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2958 
2959 	err = iwm_send_cmd(sc, &cmd);
2960 	if (err)
2961 		return err;
2962 
2963 	pkt = cmd.resp_pkt;
2964 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2965 		err = EIO;
2966 		goto exit;
2967 	}
2968 
2969 	/* Extract NVM response */
2970 	nvm_resp = (void *)pkt->data;
2971 	if (nvm_resp == NULL)
2972 		return EIO;
2973 
2974 	err = le16toh(nvm_resp->status);
2975 	bytes_read = le16toh(nvm_resp->length);
2976 	offset_read = le16toh(nvm_resp->offset);
2977 	resp_data = nvm_resp->data;
2978 	if (err) {
2979 		err = EINVAL;
2980 		goto exit;
2981 	}
2982 
2983 	if (offset_read != offset) {
2984 		err = EINVAL;
2985 		goto exit;
2986 	}
2987 
2988 	if (bytes_read > length) {
2989 		err = EINVAL;
2990 		goto exit;
2991 	}
2992 
2993 	memcpy(data + offset, resp_data, bytes_read);
2994 	*len = bytes_read;
2995 
2996  exit:
2997 	iwm_free_resp(sc, &cmd);
2998 	return err;
2999 }
3000 
3001 /*
3002  * Reads an NVM section completely.
3003  * NICs prior to 7000 family doesn't have a real NVM, but just read
3004  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
3005  * by uCode, we need to manually check in this case that we don't
3006  * overflow and try to read more than the EEPROM size.
3007  */
3008 int
iwm_nvm_read_section(struct iwm_softc * sc,uint16_t section,uint8_t * data,uint16_t * len,size_t max_len)3009 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
3010     uint16_t *len, size_t max_len)
3011 {
3012 	uint16_t chunklen, seglen;
3013 	int err = 0;
3014 
3015 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
3016 	*len = 0;
3017 
3018 	/* Read NVM chunks until exhausted (reading less than requested) */
3019 	while (seglen == chunklen && *len < max_len) {
3020 		err = iwm_nvm_read_chunk(sc,
3021 		    section, *len, chunklen, data, &seglen);
3022 		if (err)
3023 			return err;
3024 
3025 		*len += seglen;
3026 	}
3027 
3028 	return err;
3029 }
3030 
3031 uint8_t
iwm_fw_valid_tx_ant(struct iwm_softc * sc)3032 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
3033 {
3034 	uint8_t tx_ant;
3035 
3036 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
3037 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
3038 
3039 	if (sc->sc_nvm.valid_tx_ant)
3040 		tx_ant &= sc->sc_nvm.valid_tx_ant;
3041 
3042 	return tx_ant;
3043 }
3044 
3045 uint8_t
iwm_fw_valid_rx_ant(struct iwm_softc * sc)3046 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
3047 {
3048 	uint8_t rx_ant;
3049 
3050 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
3051 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
3052 
3053 	if (sc->sc_nvm.valid_rx_ant)
3054 		rx_ant &= sc->sc_nvm.valid_rx_ant;
3055 
3056 	return rx_ant;
3057 }
3058 
3059 int
iwm_valid_siso_ant_rate_mask(struct iwm_softc * sc)3060 iwm_valid_siso_ant_rate_mask(struct iwm_softc *sc)
3061 {
3062 	uint8_t valid_tx_ant = iwm_fw_valid_tx_ant(sc);
3063 
3064 	/*
3065 	 * According to the Linux driver, antenna B should be preferred
3066 	 * on 9k devices since it is not shared with bluetooth. However,
3067 	 * there are 9k devices which do not support antenna B at all.
3068 	 */
3069 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000 &&
3070 	    (valid_tx_ant & IWM_ANT_B))
3071 		return IWM_RATE_MCS_ANT_B_MSK;
3072 
3073 	return IWM_RATE_MCS_ANT_A_MSK;
3074 }
3075 
3076 void
iwm_init_channel_map(struct iwm_softc * sc,const uint16_t * const nvm_ch_flags,const uint8_t * nvm_channels,int nchan)3077 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
3078     const uint8_t *nvm_channels, int nchan)
3079 {
3080 	struct ieee80211com *ic = &sc->sc_ic;
3081 	struct iwm_nvm_data *data = &sc->sc_nvm;
3082 	int ch_idx;
3083 	struct ieee80211_channel *channel;
3084 	uint16_t ch_flags;
3085 	int is_5ghz;
3086 	int flags, hw_value;
3087 
3088 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
3089 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
3090 
3091 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
3092 		    !data->sku_cap_band_52GHz_enable)
3093 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
3094 
3095 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
3096 			continue;
3097 
3098 		hw_value = nvm_channels[ch_idx];
3099 		channel = &ic->ic_channels[hw_value];
3100 
3101 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
3102 		if (!is_5ghz) {
3103 			flags = IEEE80211_CHAN_2GHZ;
3104 			channel->ic_flags
3105 			    = IEEE80211_CHAN_CCK
3106 			    | IEEE80211_CHAN_OFDM
3107 			    | IEEE80211_CHAN_DYN
3108 			    | IEEE80211_CHAN_2GHZ;
3109 		} else {
3110 			flags = IEEE80211_CHAN_5GHZ;
3111 			channel->ic_flags =
3112 			    IEEE80211_CHAN_A;
3113 		}
3114 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3115 
3116 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
3117 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
3118 
3119 		if (data->sku_cap_11n_enable) {
3120 			channel->ic_flags |= IEEE80211_CHAN_HT;
3121 			if (ch_flags & IWM_NVM_CHANNEL_40MHZ)
3122 				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
3123 		}
3124 
3125 		if (is_5ghz && data->sku_cap_11ac_enable) {
3126 			channel->ic_flags |= IEEE80211_CHAN_VHT;
3127 			if (ch_flags & IWM_NVM_CHANNEL_80MHZ)
3128 				channel->ic_xflags |= IEEE80211_CHANX_80MHZ;
3129 		}
3130 	}
3131 }
3132 
3133 int
iwm_mimo_enabled(struct iwm_softc * sc)3134 iwm_mimo_enabled(struct iwm_softc *sc)
3135 {
3136 	struct ieee80211com *ic = &sc->sc_ic;
3137 
3138 	return !sc->sc_nvm.sku_cap_mimo_disable &&
3139 	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
3140 }
3141 
3142 void
iwm_setup_ht_rates(struct iwm_softc * sc)3143 iwm_setup_ht_rates(struct iwm_softc *sc)
3144 {
3145 	struct ieee80211com *ic = &sc->sc_ic;
3146 	uint8_t rx_ant;
3147 
3148 	/* TX is supported with the same MCS as RX. */
3149 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
3150 
3151 	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
3152 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
3153 
3154 	if (!iwm_mimo_enabled(sc))
3155 		return;
3156 
3157 	rx_ant = iwm_fw_valid_rx_ant(sc);
3158 	if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
3159 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
3160 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
3161 }
3162 
3163 void
iwm_setup_vht_rates(struct iwm_softc * sc)3164 iwm_setup_vht_rates(struct iwm_softc *sc)
3165 {
3166 	struct ieee80211com *ic = &sc->sc_ic;
3167 	uint8_t rx_ant = iwm_fw_valid_rx_ant(sc);
3168 	int n;
3169 
3170 	ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_9 <<
3171 	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(1));
3172 
3173 	if (iwm_mimo_enabled(sc) &&
3174 	    ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
3175 	    (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)) {
3176 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_9 <<
3177 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3178 	} else {
3179 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3180 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3181 	}
3182 
3183 	for (n = 3; n <= IEEE80211_VHT_NUM_SS; n++) {
3184 		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3185 		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(n));
3186 	}
3187 
3188 	ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
3189 }
3190 
3191 void
iwm_init_reorder_buffer(struct iwm_reorder_buffer * reorder_buf,uint16_t ssn,uint16_t buf_size)3192 iwm_init_reorder_buffer(struct iwm_reorder_buffer *reorder_buf,
3193     uint16_t ssn, uint16_t buf_size)
3194 {
3195 	reorder_buf->head_sn = ssn;
3196 	reorder_buf->num_stored = 0;
3197 	reorder_buf->buf_size = buf_size;
3198 	reorder_buf->last_amsdu = 0;
3199 	reorder_buf->last_sub_index = 0;
3200 	reorder_buf->removed = 0;
3201 	reorder_buf->valid = 0;
3202 	reorder_buf->consec_oldsn_drops = 0;
3203 	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3204 	reorder_buf->consec_oldsn_prev_drop = 0;
3205 }
3206 
3207 void
iwm_clear_reorder_buffer(struct iwm_softc * sc,struct iwm_rxba_data * rxba)3208 iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba)
3209 {
3210 	int i;
3211 	struct iwm_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3212 	struct iwm_reorder_buf_entry *entry;
3213 
3214 	for (i = 0; i < reorder_buf->buf_size; i++) {
3215 		entry = &rxba->entries[i];
3216 		ml_purge(&entry->frames);
3217 		timerclear(&entry->reorder_time);
3218 	}
3219 
3220 	reorder_buf->removed = 1;
3221 	timeout_del(&reorder_buf->reorder_timer);
3222 	timerclear(&rxba->last_rx);
3223 	timeout_del(&rxba->session_timer);
3224 	rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
3225 }
3226 
3227 #define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
3228 
3229 void
iwm_rx_ba_session_expired(void * arg)3230 iwm_rx_ba_session_expired(void *arg)
3231 {
3232 	struct iwm_rxba_data *rxba = arg;
3233 	struct iwm_softc *sc = rxba->sc;
3234 	struct ieee80211com *ic = &sc->sc_ic;
3235 	struct ieee80211_node *ni = ic->ic_bss;
3236 	struct timeval now, timeout, expiry;
3237 	int s;
3238 
3239 	s = splnet();
3240 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0 &&
3241 	    ic->ic_state == IEEE80211_S_RUN &&
3242 	    rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3243 		getmicrouptime(&now);
3244 		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3245 		timeradd(&rxba->last_rx, &timeout, &expiry);
3246 		if (timercmp(&now, &expiry, <)) {
3247 			timeout_add_usec(&rxba->session_timer, rxba->timeout);
3248 		} else {
3249 			ic->ic_stats.is_ht_rx_ba_timeout++;
3250 			ieee80211_delba_request(ic, ni,
3251 			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3252 		}
3253 	}
3254 	splx(s);
3255 }
3256 
3257 void
iwm_reorder_timer_expired(void * arg)3258 iwm_reorder_timer_expired(void *arg)
3259 {
3260 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3261 	struct iwm_reorder_buffer *buf = arg;
3262 	struct iwm_rxba_data *rxba = iwm_rxba_data_from_reorder_buf(buf);
3263 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
3264 	struct iwm_softc *sc = rxba->sc;
3265 	struct ieee80211com *ic = &sc->sc_ic;
3266 	struct ieee80211_node *ni = ic->ic_bss;
3267 	int i, s;
3268 	uint16_t sn = 0, index = 0;
3269 	int expired = 0;
3270 	int cont = 0;
3271 	struct timeval now, timeout, expiry;
3272 
3273 	if (!buf->num_stored || buf->removed)
3274 		return;
3275 
3276 	s = splnet();
3277 	getmicrouptime(&now);
3278 	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3279 
3280 	for (i = 0; i < buf->buf_size ; i++) {
3281 		index = (buf->head_sn + i) % buf->buf_size;
3282 
3283 		if (ml_empty(&entries[index].frames)) {
3284 			/*
3285 			 * If there is a hole and the next frame didn't expire
3286 			 * we want to break and not advance SN.
3287 			 */
3288 			cont = 0;
3289 			continue;
3290 		}
3291 		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3292 		if (!cont && timercmp(&now, &expiry, <))
3293 			break;
3294 
3295 		expired = 1;
3296 		/* continue until next hole after this expired frame */
3297 		cont = 1;
3298 		sn = (buf->head_sn + (i + 1)) & 0xfff;
3299 	}
3300 
3301 	if (expired) {
3302 		/* SN is set to the last expired frame + 1 */
3303 		iwm_release_frames(sc, ni, rxba, buf, sn, &ml);
3304 		if_input(&sc->sc_ic.ic_if, &ml);
3305 		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3306 	} else {
3307 		/*
3308 		 * If no frame expired and there are stored frames, index is now
3309 		 * pointing to the first unexpired frame - modify reorder timeout
3310 		 * accordingly.
3311 		 */
3312 		timeout_add_usec(&buf->reorder_timer,
3313 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3314 	}
3315 
3316 	splx(s);
3317 }
3318 
3319 #define IWM_MAX_RX_BA_SESSIONS 16
3320 
3321 int
iwm_sta_rx_agg(struct iwm_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int timeout_val,int start)3322 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3323     uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3324 {
3325 	struct ieee80211com *ic = &sc->sc_ic;
3326 	struct iwm_add_sta_cmd cmd;
3327 	struct iwm_node *in = (void *)ni;
3328 	int err, s;
3329 	uint32_t status;
3330 	size_t cmdsize;
3331 	struct iwm_rxba_data *rxba = NULL;
3332 	uint8_t baid = 0;
3333 
3334 	s = splnet();
3335 
3336 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
3337 		ieee80211_addba_req_refuse(ic, ni, tid);
3338 		splx(s);
3339 		return 0;
3340 	}
3341 
3342 	memset(&cmd, 0, sizeof(cmd));
3343 
3344 	cmd.sta_id = IWM_STATION_ID;
3345 	cmd.mac_id_n_color
3346 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3347 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3348 
3349 	if (start) {
3350 		cmd.add_immediate_ba_tid = (uint8_t)tid;
3351 		cmd.add_immediate_ba_ssn = ssn;
3352 		cmd.rx_ba_window = winsize;
3353 	} else {
3354 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3355 	}
3356 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
3357 	    IWM_STA_MODIFY_REMOVE_BA_TID;
3358 
3359 	status = IWM_ADD_STA_SUCCESS;
3360 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3361 		cmdsize = sizeof(cmd);
3362 	else
3363 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3364 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
3365 	    &status);
3366 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3367 		err = EIO;
3368 	if (err) {
3369 		if (start)
3370 			ieee80211_addba_req_refuse(ic, ni, tid);
3371 		splx(s);
3372 		return err;
3373 	}
3374 
3375 	if (sc->sc_mqrx_supported) {
3376 		/* Deaggregation is done in hardware. */
3377 		if (start) {
3378 			if (!(status & IWM_ADD_STA_BAID_VALID_MASK)) {
3379 				ieee80211_addba_req_refuse(ic, ni, tid);
3380 				splx(s);
3381 				return EIO;
3382 			}
3383 			baid = (status & IWM_ADD_STA_BAID_MASK) >>
3384 			    IWM_ADD_STA_BAID_SHIFT;
3385 			if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
3386 			    baid >= nitems(sc->sc_rxba_data)) {
3387 				ieee80211_addba_req_refuse(ic, ni, tid);
3388 				splx(s);
3389 				return EIO;
3390 			}
3391 			rxba = &sc->sc_rxba_data[baid];
3392 			if (rxba->baid != IWM_RX_REORDER_DATA_INVALID_BAID) {
3393 				ieee80211_addba_req_refuse(ic, ni, tid);
3394 				splx(s);
3395 				return 0;
3396 			}
3397 			rxba->sta_id = IWM_STATION_ID;
3398 			rxba->tid = tid;
3399 			rxba->baid = baid;
3400 			rxba->timeout = timeout_val;
3401 			getmicrouptime(&rxba->last_rx);
3402 			iwm_init_reorder_buffer(&rxba->reorder_buf, ssn,
3403 			    winsize);
3404 			if (timeout_val != 0) {
3405 				struct ieee80211_rx_ba *ba;
3406 				timeout_add_usec(&rxba->session_timer,
3407 				    timeout_val);
3408 				/* XXX disable net80211's BA timeout handler */
3409 				ba = &ni->ni_rx_ba[tid];
3410 				ba->ba_timeout_val = 0;
3411 			}
3412 		} else {
3413 			int i;
3414 			for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3415 				rxba = &sc->sc_rxba_data[i];
3416 				if (rxba->baid ==
3417 				    IWM_RX_REORDER_DATA_INVALID_BAID)
3418 					continue;
3419 				if (rxba->tid != tid)
3420 					continue;
3421 				iwm_clear_reorder_buffer(sc, rxba);
3422 				break;
3423 			}
3424 		}
3425 	}
3426 
3427 	if (start) {
3428 		sc->sc_rx_ba_sessions++;
3429 		ieee80211_addba_req_accept(ic, ni, tid);
3430 	} else if (sc->sc_rx_ba_sessions > 0)
3431 		sc->sc_rx_ba_sessions--;
3432 
3433 	splx(s);
3434 	return 0;
3435 }
3436 
3437 void
iwm_mac_ctxt_task(void * arg)3438 iwm_mac_ctxt_task(void *arg)
3439 {
3440 	struct iwm_softc *sc = arg;
3441 	struct ieee80211com *ic = &sc->sc_ic;
3442 	struct iwm_node *in = (void *)ic->ic_bss;
3443 	int err, s = splnet();
3444 
3445 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3446 	    ic->ic_state != IEEE80211_S_RUN) {
3447 		refcnt_rele_wake(&sc->task_refs);
3448 		splx(s);
3449 		return;
3450 	}
3451 
3452 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
3453 	if (err)
3454 		printf("%s: failed to update MAC\n", DEVNAME(sc));
3455 
3456 	iwm_unprotect_session(sc, in);
3457 
3458 	refcnt_rele_wake(&sc->task_refs);
3459 	splx(s);
3460 }
3461 
3462 void
iwm_updateprot(struct ieee80211com * ic)3463 iwm_updateprot(struct ieee80211com *ic)
3464 {
3465 	struct iwm_softc *sc = ic->ic_softc;
3466 
3467 	if (ic->ic_state == IEEE80211_S_RUN &&
3468 	    !task_pending(&sc->newstate_task))
3469 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3470 }
3471 
3472 void
iwm_updateslot(struct ieee80211com * ic)3473 iwm_updateslot(struct ieee80211com *ic)
3474 {
3475 	struct iwm_softc *sc = ic->ic_softc;
3476 
3477 	if (ic->ic_state == IEEE80211_S_RUN &&
3478 	    !task_pending(&sc->newstate_task))
3479 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3480 }
3481 
3482 void
iwm_updateedca(struct ieee80211com * ic)3483 iwm_updateedca(struct ieee80211com *ic)
3484 {
3485 	struct iwm_softc *sc = ic->ic_softc;
3486 
3487 	if (ic->ic_state == IEEE80211_S_RUN &&
3488 	    !task_pending(&sc->newstate_task))
3489 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3490 }
3491 
3492 void
iwm_phy_ctxt_task(void * arg)3493 iwm_phy_ctxt_task(void *arg)
3494 {
3495 	struct iwm_softc *sc = arg;
3496 	struct ieee80211com *ic = &sc->sc_ic;
3497 	struct iwm_node *in = (void *)ic->ic_bss;
3498 	struct ieee80211_node *ni = &in->in_ni;
3499 	uint8_t chains, sco, vht_chan_width;
3500 	int err, s = splnet();
3501 
3502 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3503 	    ic->ic_state != IEEE80211_S_RUN ||
3504 	    in->in_phyctxt == NULL) {
3505 		refcnt_rele_wake(&sc->task_refs);
3506 		splx(s);
3507 		return;
3508 	}
3509 
3510 	chains = iwm_mimo_enabled(sc) ? 2 : 1;
3511 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3512 	    IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
3513 	    ieee80211_node_supports_ht_chan40(ni))
3514 		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3515 	else
3516 		sco = IEEE80211_HTOP0_SCO_SCN;
3517 	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
3518 	    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
3519 	    ieee80211_node_supports_vht_chan80(ni))
3520 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
3521 	else
3522 		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
3523 	if (in->in_phyctxt->sco != sco ||
3524 	    in->in_phyctxt->vht_chan_width != vht_chan_width) {
3525 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
3526 		    in->in_phyctxt->channel, chains, chains, 0, sco,
3527 		    vht_chan_width);
3528 		if (err)
3529 			printf("%s: failed to update PHY\n", DEVNAME(sc));
3530 		iwm_setrates(in, 0);
3531 	}
3532 
3533 	refcnt_rele_wake(&sc->task_refs);
3534 	splx(s);
3535 }
3536 
3537 void
iwm_updatechan(struct ieee80211com * ic)3538 iwm_updatechan(struct ieee80211com *ic)
3539 {
3540 	struct iwm_softc *sc = ic->ic_softc;
3541 
3542 	if (ic->ic_state == IEEE80211_S_RUN &&
3543 	    !task_pending(&sc->newstate_task))
3544 		iwm_add_task(sc, systq, &sc->phy_ctxt_task);
3545 }
3546 
3547 void
iwm_updatedtim(struct ieee80211com * ic)3548 iwm_updatedtim(struct ieee80211com *ic)
3549 {
3550 	struct iwm_softc *sc = ic->ic_softc;
3551 
3552 	if (ic->ic_state == IEEE80211_S_RUN &&
3553 	    !task_pending(&sc->newstate_task))
3554 		iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3555 }
3556 
3557 int
iwm_sta_tx_agg(struct iwm_softc * sc,struct ieee80211_node * ni,uint8_t tid,uint16_t ssn,uint16_t winsize,int start)3558 iwm_sta_tx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3559     uint16_t ssn, uint16_t winsize, int start)
3560 {
3561 	struct iwm_add_sta_cmd cmd;
3562 	struct ieee80211com *ic = &sc->sc_ic;
3563 	struct iwm_node *in = (void *)ni;
3564 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3565 	struct iwm_tx_ring *ring;
3566 	enum ieee80211_edca_ac ac;
3567 	int fifo;
3568 	uint32_t status;
3569 	int err;
3570 	size_t cmdsize;
3571 
3572 	/* Ensure we can map this TID to an aggregation queue. */
3573 	if (tid >= IWM_MAX_TID_COUNT || qid > IWM_LAST_AGG_TX_QUEUE)
3574 		return ENOSPC;
3575 
3576 	if (start) {
3577 		if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3578 			return 0;
3579 	} else {
3580 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3581 			return 0;
3582 	}
3583 
3584 	ring = &sc->txq[qid];
3585 	ac = iwm_tid_to_ac[tid];
3586 	fifo = iwm_ac_to_tx_fifo[ac];
3587 
3588 	memset(&cmd, 0, sizeof(cmd));
3589 
3590 	cmd.sta_id = IWM_STATION_ID;
3591 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
3592 	    in->in_color));
3593 	cmd.add_modify = IWM_STA_MODE_MODIFY;
3594 
3595 	if (start) {
3596 		/* Enable Tx aggregation for this queue. */
3597 		in->tid_disable_ampdu &= ~(1 << tid);
3598 		in->tfd_queue_msk |= (1 << qid);
3599 	} else {
3600 		in->tid_disable_ampdu |= (1 << tid);
3601 		/*
3602 		 * Queue remains enabled in the TFD queue mask
3603 		 * until we leave RUN state.
3604 		 */
3605 		err = iwm_flush_sta(sc, in);
3606 		if (err)
3607 			return err;
3608 	}
3609 
3610 	cmd.tfd_queue_msk |= htole32(in->tfd_queue_msk);
3611 	cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
3612 	cmd.modify_mask = (IWM_STA_MODIFY_QUEUES |
3613 	    IWM_STA_MODIFY_TID_DISABLE_TX);
3614 
3615 	if (start && (sc->qenablemsk & (1 << qid)) == 0) {
3616 		if (!iwm_nic_lock(sc)) {
3617 			if (start)
3618 				ieee80211_addba_resp_refuse(ic, ni, tid,
3619 				    IEEE80211_STATUS_UNSPECIFIED);
3620 			return EBUSY;
3621 		}
3622 		err = iwm_enable_txq(sc, IWM_STATION_ID, qid, fifo, 1, tid,
3623 		    ssn);
3624 		iwm_nic_unlock(sc);
3625 		if (err) {
3626 			printf("%s: could not enable Tx queue %d (error %d)\n",
3627 			    DEVNAME(sc), qid, err);
3628 			if (start)
3629 				ieee80211_addba_resp_refuse(ic, ni, tid,
3630 				    IEEE80211_STATUS_UNSPECIFIED);
3631 			return err;
3632 		}
3633 		/*
3634 		 * If iwm_enable_txq() employed the SCD hardware bug
3635 		 * workaround we must skip the frame with seqnum SSN.
3636 		 */
3637 		if (ring->cur != IWM_AGG_SSN_TO_TXQ_IDX(ssn)) {
3638 			ssn = (ssn + 1) & 0xfff;
3639 			KASSERT(ring->cur == IWM_AGG_SSN_TO_TXQ_IDX(ssn));
3640 			ieee80211_output_ba_move_window(ic, ni, tid, ssn);
3641 			ni->ni_qos_txseqs[tid] = ssn;
3642 		}
3643 	}
3644 
3645 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3646 		cmdsize = sizeof(cmd);
3647 	else
3648 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
3649 
3650 	status = 0;
3651 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd, &status);
3652 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
3653 		err = EIO;
3654 	if (err) {
3655 		printf("%s: could not update sta (error %d)\n",
3656 		    DEVNAME(sc), err);
3657 		if (start)
3658 			ieee80211_addba_resp_refuse(ic, ni, tid,
3659 			    IEEE80211_STATUS_UNSPECIFIED);
3660 		return err;
3661 	}
3662 
3663 	if (start) {
3664 		sc->tx_ba_queue_mask |= (1 << qid);
3665 		ieee80211_addba_resp_accept(ic, ni, tid);
3666 	} else {
3667 		sc->tx_ba_queue_mask &= ~(1 << qid);
3668 
3669 		/*
3670 		 * Clear pending frames but keep the queue enabled.
3671 		 * Firmware panics if we disable the queue here.
3672 		 */
3673 		iwm_txq_advance(sc, ring, ring->cur);
3674 		iwm_clear_oactive(sc, ring);
3675 	}
3676 
3677 	return 0;
3678 }
3679 
3680 void
iwm_ba_task(void * arg)3681 iwm_ba_task(void *arg)
3682 {
3683 	struct iwm_softc *sc = arg;
3684 	struct ieee80211com *ic = &sc->sc_ic;
3685 	struct ieee80211_node *ni = ic->ic_bss;
3686 	int s = splnet();
3687 	int tid, err = 0;
3688 
3689 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3690 	    ic->ic_state != IEEE80211_S_RUN) {
3691 		refcnt_rele_wake(&sc->task_refs);
3692 		splx(s);
3693 		return;
3694 	}
3695 
3696 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3697 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3698 			break;
3699 		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3700 			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3701 			err = iwm_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3702 			    ba->ba_winsize, ba->ba_timeout_val, 1);
3703 			sc->ba_rx.start_tidmask &= ~(1 << tid);
3704 		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3705 			err = iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3706 			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3707 		}
3708 	}
3709 
3710 	for (tid = 0; tid < IWM_MAX_TID_COUNT && !err; tid++) {
3711 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3712 			break;
3713 		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3714 			struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3715 			err = iwm_sta_tx_agg(sc, ni, tid, ba->ba_winstart,
3716 			    ba->ba_winsize, 1);
3717 			sc->ba_tx.start_tidmask &= ~(1 << tid);
3718 		} else if (sc->ba_tx.stop_tidmask & (1 << tid)) {
3719 			err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
3720 			sc->ba_tx.stop_tidmask &= ~(1 << tid);
3721 		}
3722 	}
3723 
3724 	/*
3725 	 * We "recover" from failure to start or stop a BA session
3726 	 * by resetting the device.
3727 	 */
3728 	if (err && (sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
3729 		task_add(systq, &sc->init_task);
3730 
3731 	refcnt_rele_wake(&sc->task_refs);
3732 	splx(s);
3733 }
3734 
3735 /*
3736  * This function is called by upper layer when an ADDBA request is received
3737  * from another STA and before the ADDBA response is sent.
3738  */
3739 int
iwm_ampdu_rx_start(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)3740 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3741     uint8_t tid)
3742 {
3743 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3744 
3745 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS ||
3746 	    tid > IWM_MAX_TID_COUNT)
3747 		return ENOSPC;
3748 
3749 	if (sc->ba_rx.start_tidmask & (1 << tid))
3750 		return EBUSY;
3751 
3752 	sc->ba_rx.start_tidmask |= (1 << tid);
3753 	iwm_add_task(sc, systq, &sc->ba_task);
3754 
3755 	return EBUSY;
3756 }
3757 
3758 /*
3759  * This function is called by upper layer on teardown of an HT-immediate
3760  * Block Ack agreement (eg. upon receipt of a DELBA frame).
3761  */
3762 void
iwm_ampdu_rx_stop(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)3763 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3764     uint8_t tid)
3765 {
3766 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3767 
3768 	if (tid > IWM_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3769 		return;
3770 
3771 	sc->ba_rx.stop_tidmask |= (1 << tid);
3772 	iwm_add_task(sc, systq, &sc->ba_task);
3773 }
3774 
3775 int
iwm_ampdu_tx_start(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)3776 iwm_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3777     uint8_t tid)
3778 {
3779 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3780 	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3781 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3782 
3783 	/* We only implement Tx aggregation with DQA-capable firmware. */
3784 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
3785 		return ENOTSUP;
3786 
3787 	/* Ensure we can map this TID to an aggregation queue. */
3788 	if (tid >= IWM_MAX_TID_COUNT)
3789 		return EINVAL;
3790 
3791 	/* We only support a fixed Tx aggregation window size, for now. */
3792 	if (ba->ba_winsize != IWM_FRAME_LIMIT)
3793 		return ENOTSUP;
3794 
3795 	/* Is firmware already using Tx aggregation on this queue? */
3796 	if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3797 		return ENOSPC;
3798 
3799 	/* Are we already processing an ADDBA request? */
3800 	if (sc->ba_tx.start_tidmask & (1 << tid))
3801 		return EBUSY;
3802 
3803 	sc->ba_tx.start_tidmask |= (1 << tid);
3804 	iwm_add_task(sc, systq, &sc->ba_task);
3805 
3806 	return EBUSY;
3807 }
3808 
3809 void
iwm_ampdu_tx_stop(struct ieee80211com * ic,struct ieee80211_node * ni,uint8_t tid)3810 iwm_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3811     uint8_t tid)
3812 {
3813 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3814 	int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
3815 
3816 	if (tid > IWM_MAX_TID_COUNT || sc->ba_tx.stop_tidmask & (1 << tid))
3817 		return;
3818 
3819 	/* Is firmware currently using Tx aggregation on this queue? */
3820 	if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3821 		return;
3822 
3823 	sc->ba_tx.stop_tidmask |= (1 << tid);
3824 	iwm_add_task(sc, systq, &sc->ba_task);
3825 }
3826 
3827 void
iwm_set_hw_address_8000(struct iwm_softc * sc,struct iwm_nvm_data * data,const uint16_t * mac_override,const uint16_t * nvm_hw)3828 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3829     const uint16_t *mac_override, const uint16_t *nvm_hw)
3830 {
3831 	const uint8_t *hw_addr;
3832 
3833 	if (mac_override) {
3834 		static const uint8_t reserved_mac[] = {
3835 			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3836 		};
3837 
3838 		hw_addr = (const uint8_t *)(mac_override +
3839 				 IWM_MAC_ADDRESS_OVERRIDE_8000);
3840 
3841 		/*
3842 		 * Store the MAC address from MAO section.
3843 		 * No byte swapping is required in MAO section
3844 		 */
3845 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3846 
3847 		/*
3848 		 * Force the use of the OTP MAC address in case of reserved MAC
3849 		 * address in the NVM, or if address is given but invalid.
3850 		 */
3851 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3852 		    (memcmp(etherbroadcastaddr, data->hw_addr,
3853 		    sizeof(etherbroadcastaddr)) != 0) &&
3854 		    (memcmp(etheranyaddr, data->hw_addr,
3855 		    sizeof(etheranyaddr)) != 0) &&
3856 		    !ETHER_IS_MULTICAST(data->hw_addr))
3857 			return;
3858 	}
3859 
3860 	if (nvm_hw) {
3861 		/* Read the mac address from WFMP registers. */
3862 		uint32_t mac_addr0, mac_addr1;
3863 
3864 		if (!iwm_nic_lock(sc))
3865 			goto out;
3866 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3867 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3868 		iwm_nic_unlock(sc);
3869 
3870 		hw_addr = (const uint8_t *)&mac_addr0;
3871 		data->hw_addr[0] = hw_addr[3];
3872 		data->hw_addr[1] = hw_addr[2];
3873 		data->hw_addr[2] = hw_addr[1];
3874 		data->hw_addr[3] = hw_addr[0];
3875 
3876 		hw_addr = (const uint8_t *)&mac_addr1;
3877 		data->hw_addr[4] = hw_addr[1];
3878 		data->hw_addr[5] = hw_addr[0];
3879 
3880 		return;
3881 	}
3882 out:
3883 	printf("%s: mac address not found\n", DEVNAME(sc));
3884 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3885 }
3886 
3887 int
iwm_parse_nvm_data(struct iwm_softc * sc,const uint16_t * nvm_hw,const uint16_t * nvm_sw,const uint16_t * nvm_calib,const uint16_t * mac_override,const uint16_t * phy_sku,const uint16_t * regulatory,int n_regulatory)3888 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3889     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3890     const uint16_t *mac_override, const uint16_t *phy_sku,
3891     const uint16_t *regulatory, int n_regulatory)
3892 {
3893 	struct iwm_nvm_data *data = &sc->sc_nvm;
3894 	uint8_t hw_addr[ETHER_ADDR_LEN];
3895 	uint32_t sku;
3896 	uint16_t lar_config;
3897 
3898 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3899 
3900 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3901 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3902 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3903 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3904 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3905 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3906 
3907 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
3908 	} else {
3909 		uint32_t radio_cfg =
3910 		    le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
3911 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3912 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3913 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3914 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3915 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3916 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3917 
3918 		sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
3919 	}
3920 
3921 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3922 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3923 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3924 	data->sku_cap_11ac_enable = sku & IWM_NVM_SKU_CAP_11AC_ENABLE;
3925 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3926 
3927 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3928 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
3929 				       IWM_NVM_LAR_OFFSET_8000_OLD :
3930 				       IWM_NVM_LAR_OFFSET_8000;
3931 
3932 		lar_config = le16_to_cpup(regulatory + lar_offset);
3933 		data->lar_enabled = !!(lar_config &
3934 				       IWM_NVM_LAR_ENABLED_8000);
3935 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000);
3936 	} else
3937 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3938 
3939 
3940 	/* The byte order is little endian 16 bit, meaning 214365 */
3941 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3942 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3943 		data->hw_addr[0] = hw_addr[1];
3944 		data->hw_addr[1] = hw_addr[0];
3945 		data->hw_addr[2] = hw_addr[3];
3946 		data->hw_addr[3] = hw_addr[2];
3947 		data->hw_addr[4] = hw_addr[5];
3948 		data->hw_addr[5] = hw_addr[4];
3949 	} else
3950 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3951 
3952 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3953 		if (sc->nvm_type == IWM_NVM_SDP) {
3954 			iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3955 			    MIN(n_regulatory, nitems(iwm_nvm_channels)));
3956 		} else {
3957 			iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3958 			    iwm_nvm_channels, nitems(iwm_nvm_channels));
3959 		}
3960 	} else
3961 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3962 		    iwm_nvm_channels_8000,
3963 		    MIN(n_regulatory, nitems(iwm_nvm_channels_8000)));
3964 
3965 	data->calib_version = 255;   /* TODO:
3966 					this value will prevent some checks from
3967 					failing, we need to check if this
3968 					field is still needed, and if it does,
3969 					where is it in the NVM */
3970 
3971 	return 0;
3972 }
3973 
3974 int
iwm_parse_nvm_sections(struct iwm_softc * sc,struct iwm_nvm_section * sections)3975 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3976 {
3977 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3978 	const uint16_t *regulatory = NULL;
3979 	int n_regulatory = 0;
3980 
3981 	/* Checking for required sections */
3982 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3983 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3984 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3985 			return ENOENT;
3986 		}
3987 
3988 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3989 
3990 		if (sc->nvm_type == IWM_NVM_SDP) {
3991 			if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data)
3992 				return ENOENT;
3993 			regulatory = (const uint16_t *)
3994 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data;
3995 			n_regulatory =
3996 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length;
3997 		}
3998 	} else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3999 		/* SW and REGULATORY sections are mandatory */
4000 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
4001 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
4002 			return ENOENT;
4003 		}
4004 		/* MAC_OVERRIDE or at least HW section must exist */
4005 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
4006 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
4007 			return ENOENT;
4008 		}
4009 
4010 		/* PHY_SKU section is mandatory in B0 */
4011 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
4012 			return ENOENT;
4013 		}
4014 
4015 		regulatory = (const uint16_t *)
4016 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
4017 		n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY].length;
4018 		hw = (const uint16_t *)
4019 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
4020 		mac_override =
4021 			(const uint16_t *)
4022 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
4023 		phy_sku = (const uint16_t *)
4024 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
4025 	} else {
4026 		panic("unknown device family %d", sc->sc_device_family);
4027 	}
4028 
4029 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
4030 	calib = (const uint16_t *)
4031 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
4032 
4033 	/* XXX should pass in the length of every section */
4034 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
4035 	    phy_sku, regulatory, n_regulatory);
4036 }
4037 
4038 int
iwm_nvm_init(struct iwm_softc * sc)4039 iwm_nvm_init(struct iwm_softc *sc)
4040 {
4041 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
4042 	int i, section, err;
4043 	uint16_t len;
4044 	uint8_t *buf;
4045 	const size_t bufsz = sc->sc_nvm_max_section_size;
4046 
4047 	memset(nvm_sections, 0, sizeof(nvm_sections));
4048 
4049 	buf = malloc(bufsz, M_DEVBUF, M_WAIT);
4050 	if (buf == NULL)
4051 		return ENOMEM;
4052 
4053 	for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
4054 		section = iwm_nvm_to_read[i];
4055 		KASSERT(section <= nitems(nvm_sections));
4056 
4057 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
4058 		if (err) {
4059 			err = 0;
4060 			continue;
4061 		}
4062 		nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
4063 		if (nvm_sections[section].data == NULL) {
4064 			err = ENOMEM;
4065 			break;
4066 		}
4067 		memcpy(nvm_sections[section].data, buf, len);
4068 		nvm_sections[section].length = len;
4069 	}
4070 	free(buf, M_DEVBUF, bufsz);
4071 	if (err == 0)
4072 		err = iwm_parse_nvm_sections(sc, nvm_sections);
4073 
4074 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
4075 		if (nvm_sections[i].data != NULL)
4076 			free(nvm_sections[i].data, M_DEVBUF,
4077 			    nvm_sections[i].length);
4078 	}
4079 
4080 	return err;
4081 }
4082 
4083 int
iwm_firmware_load_sect(struct iwm_softc * sc,uint32_t dst_addr,const uint8_t * section,uint32_t byte_cnt)4084 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
4085     const uint8_t *section, uint32_t byte_cnt)
4086 {
4087 	int err = EINVAL;
4088 	uint32_t chunk_sz, offset;
4089 
4090 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
4091 
4092 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
4093 		uint32_t addr, len;
4094 		const uint8_t *data;
4095 
4096 		addr = dst_addr + offset;
4097 		len = MIN(chunk_sz, byte_cnt - offset);
4098 		data = section + offset;
4099 
4100 		err = iwm_firmware_load_chunk(sc, addr, data, len);
4101 		if (err)
4102 			break;
4103 	}
4104 
4105 	return err;
4106 }
4107 
4108 int
iwm_firmware_load_chunk(struct iwm_softc * sc,uint32_t dst_addr,const uint8_t * chunk,uint32_t byte_cnt)4109 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
4110     const uint8_t *chunk, uint32_t byte_cnt)
4111 {
4112 	struct iwm_dma_info *dma = &sc->fw_dma;
4113 	int err;
4114 
4115 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
4116 	memcpy(dma->vaddr, chunk, byte_cnt);
4117 	bus_dmamap_sync(sc->sc_dmat,
4118 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
4119 
4120 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
4121 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
4122 		err = iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
4123 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
4124 		if (err)
4125 			return err;
4126 	}
4127 
4128 	sc->sc_fw_chunk_done = 0;
4129 
4130 	if (!iwm_nic_lock(sc))
4131 		return EBUSY;
4132 
4133 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4134 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4135 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
4136 	    dst_addr);
4137 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
4138 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
4139 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
4140 	    (iwm_get_dma_hi_addr(dma->paddr)
4141 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
4142 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
4143 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
4144 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
4145 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4146 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4147 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
4148 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
4149 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4150 
4151 	iwm_nic_unlock(sc);
4152 
4153 	/* Wait for this segment to load. */
4154 	err = 0;
4155 	while (!sc->sc_fw_chunk_done) {
4156 		err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
4157 		if (err)
4158 			break;
4159 	}
4160 
4161 	if (!sc->sc_fw_chunk_done)
4162 		printf("%s: fw chunk addr 0x%x len %d failed to load\n",
4163 		    DEVNAME(sc), dst_addr, byte_cnt);
4164 
4165 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
4166 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
4167 		int err2 = iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
4168 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
4169 		if (!err)
4170 			err = err2;
4171 	}
4172 
4173 	return err;
4174 }
4175 
4176 int
iwm_load_firmware_7000(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)4177 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4178 {
4179 	struct iwm_fw_sects *fws;
4180 	int err, i;
4181 	void *data;
4182 	uint32_t dlen;
4183 	uint32_t offset;
4184 
4185 	fws = &sc->sc_fw.fw_sects[ucode_type];
4186 	for (i = 0; i < fws->fw_count; i++) {
4187 		data = fws->fw_sect[i].fws_data;
4188 		dlen = fws->fw_sect[i].fws_len;
4189 		offset = fws->fw_sect[i].fws_devoff;
4190 		if (dlen > sc->sc_fwdmasegsz) {
4191 			err = EFBIG;
4192 		} else
4193 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
4194 		if (err) {
4195 			printf("%s: could not load firmware chunk %u of %u\n",
4196 			    DEVNAME(sc), i, fws->fw_count);
4197 			return err;
4198 		}
4199 	}
4200 
4201 	iwm_enable_interrupts(sc);
4202 
4203 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
4204 
4205 	return 0;
4206 }
4207 
4208 int
iwm_load_cpu_sections_8000(struct iwm_softc * sc,struct iwm_fw_sects * fws,int cpu,int * first_ucode_section)4209 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
4210     int cpu, int *first_ucode_section)
4211 {
4212 	int shift_param;
4213 	int i, err = 0, sec_num = 0x1;
4214 	uint32_t val, last_read_idx = 0;
4215 	void *data;
4216 	uint32_t dlen;
4217 	uint32_t offset;
4218 
4219 	if (cpu == 1) {
4220 		shift_param = 0;
4221 		*first_ucode_section = 0;
4222 	} else {
4223 		shift_param = 16;
4224 		(*first_ucode_section)++;
4225 	}
4226 
4227 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
4228 		last_read_idx = i;
4229 		data = fws->fw_sect[i].fws_data;
4230 		dlen = fws->fw_sect[i].fws_len;
4231 		offset = fws->fw_sect[i].fws_devoff;
4232 
4233 		/*
4234 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
4235 		 * CPU1 to CPU2.
4236 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
4237 		 * CPU2 non paged to CPU2 paging sec.
4238 		 */
4239 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
4240 		    offset == IWM_PAGING_SEPARATOR_SECTION)
4241 			break;
4242 
4243 		if (dlen > sc->sc_fwdmasegsz) {
4244 			err = EFBIG;
4245 		} else
4246 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
4247 		if (err) {
4248 			printf("%s: could not load firmware chunk %d "
4249 			    "(error %d)\n", DEVNAME(sc), i, err);
4250 			return err;
4251 		}
4252 
4253 		/* Notify the ucode of the loaded section number and status */
4254 		if (iwm_nic_lock(sc)) {
4255 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
4256 			val = val | (sec_num << shift_param);
4257 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
4258 			sec_num = (sec_num << 1) | 0x1;
4259 			iwm_nic_unlock(sc);
4260 		} else {
4261 			err = EBUSY;
4262 			printf("%s: could not load firmware chunk %d "
4263 			    "(error %d)\n", DEVNAME(sc), i, err);
4264 			return err;
4265 		}
4266 	}
4267 
4268 	*first_ucode_section = last_read_idx;
4269 
4270 	if (iwm_nic_lock(sc)) {
4271 		if (cpu == 1)
4272 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
4273 		else
4274 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
4275 		iwm_nic_unlock(sc);
4276 	} else {
4277 		err = EBUSY;
4278 		printf("%s: could not finalize firmware loading (error %d)\n",
4279 		    DEVNAME(sc), err);
4280 		return err;
4281 	}
4282 
4283 	return 0;
4284 }
4285 
4286 int
iwm_load_firmware_8000(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)4287 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4288 {
4289 	struct iwm_fw_sects *fws;
4290 	int err = 0;
4291 	int first_ucode_section;
4292 
4293 	fws = &sc->sc_fw.fw_sects[ucode_type];
4294 
4295 	/* configure the ucode to be ready to get the secured image */
4296 	/* release CPU reset */
4297 	if (iwm_nic_lock(sc)) {
4298 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
4299 		    IWM_RELEASE_CPU_RESET_BIT);
4300 		iwm_nic_unlock(sc);
4301 	}
4302 
4303 	/* load to FW the binary Secured sections of CPU1 */
4304 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
4305 	if (err)
4306 		return err;
4307 
4308 	/* load to FW the binary sections of CPU2 */
4309 	err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
4310 	if (err)
4311 		return err;
4312 
4313 	iwm_enable_interrupts(sc);
4314 	return 0;
4315 }
4316 
4317 int
iwm_load_firmware(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)4318 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4319 {
4320 	int err;
4321 
4322 	splassert(IPL_NET);
4323 
4324 	sc->sc_uc.uc_intr = 0;
4325 	sc->sc_uc.uc_ok = 0;
4326 
4327 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
4328 		err = iwm_load_firmware_8000(sc, ucode_type);
4329 	else
4330 		err = iwm_load_firmware_7000(sc, ucode_type);
4331 
4332 	if (err)
4333 		return err;
4334 
4335 	/* wait for the firmware to load */
4336 	err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", SEC_TO_NSEC(1));
4337 	if (err || !sc->sc_uc.uc_ok)
4338 		printf("%s: could not load firmware\n", DEVNAME(sc));
4339 
4340 	return err;
4341 }
4342 
4343 int
iwm_start_fw(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)4344 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4345 {
4346 	int err;
4347 
4348 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4349 
4350 	err = iwm_nic_init(sc);
4351 	if (err) {
4352 		printf("%s: unable to init nic\n", DEVNAME(sc));
4353 		return err;
4354 	}
4355 
4356 	/* make sure rfkill handshake bits are cleared */
4357 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4358 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
4359 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4360 
4361 	/* clear (again), then enable firmware load interrupt */
4362 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
4363 	iwm_enable_fwload_interrupt(sc);
4364 
4365 	/* really make sure rfkill handshake bits are cleared */
4366 	/* maybe we should write a few times more?  just to make sure */
4367 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4368 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4369 
4370 	return iwm_load_firmware(sc, ucode_type);
4371 }
4372 
4373 int
iwm_send_tx_ant_cfg(struct iwm_softc * sc,uint8_t valid_tx_ant)4374 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
4375 {
4376 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
4377 		.valid = htole32(valid_tx_ant),
4378 	};
4379 
4380 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
4381 	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4382 }
4383 
4384 int
iwm_send_phy_cfg_cmd(struct iwm_softc * sc)4385 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
4386 {
4387 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
4388 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
4389 
4390 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config |
4391 	    sc->sc_extra_phy_config);
4392 	phy_cfg_cmd.calib_control.event_trigger =
4393 	    sc->sc_default_calib[ucode_type].event_trigger;
4394 	phy_cfg_cmd.calib_control.flow_trigger =
4395 	    sc->sc_default_calib[ucode_type].flow_trigger;
4396 
4397 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
4398 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4399 }
4400 
4401 int
iwm_send_dqa_cmd(struct iwm_softc * sc)4402 iwm_send_dqa_cmd(struct iwm_softc *sc)
4403 {
4404 	struct iwm_dqa_enable_cmd dqa_cmd = {
4405 		.cmd_queue = htole32(IWM_DQA_CMD_QUEUE),
4406 	};
4407 	uint32_t cmd_id;
4408 
4409 	cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD, IWM_DATA_PATH_GROUP, 0);
4410 	return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4411 }
4412 
4413 int
iwm_load_ucode_wait_alive(struct iwm_softc * sc,enum iwm_ucode_type ucode_type)4414 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
4415 	enum iwm_ucode_type ucode_type)
4416 {
4417 	enum iwm_ucode_type old_type = sc->sc_uc_current;
4418 	struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
4419 	int err;
4420 
4421 	err = iwm_read_firmware(sc);
4422 	if (err)
4423 		return err;
4424 
4425 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
4426 		sc->cmdqid = IWM_DQA_CMD_QUEUE;
4427 	else
4428 		sc->cmdqid = IWM_CMD_QUEUE;
4429 
4430 	sc->sc_uc_current = ucode_type;
4431 	err = iwm_start_fw(sc, ucode_type);
4432 	if (err) {
4433 		sc->sc_uc_current = old_type;
4434 		return err;
4435 	}
4436 
4437 	err = iwm_post_alive(sc);
4438 	if (err)
4439 		return err;
4440 
4441 	/*
4442 	 * configure and operate fw paging mechanism.
4443 	 * driver configures the paging flow only once, CPU2 paging image
4444 	 * included in the IWM_UCODE_INIT image.
4445 	 */
4446 	if (fw->paging_mem_size) {
4447 		err = iwm_save_fw_paging(sc, fw);
4448 		if (err) {
4449 			printf("%s: failed to save the FW paging image\n",
4450 			    DEVNAME(sc));
4451 			return err;
4452 		}
4453 
4454 		err = iwm_send_paging_cmd(sc, fw);
4455 		if (err) {
4456 			printf("%s: failed to send the paging cmd\n",
4457 			    DEVNAME(sc));
4458 			iwm_free_fw_paging(sc);
4459 			return err;
4460 		}
4461 	}
4462 
4463 	return 0;
4464 }
4465 
4466 int
iwm_run_init_mvm_ucode(struct iwm_softc * sc,int justnvm)4467 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
4468 {
4469 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
4470 	int err, s;
4471 
4472 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
4473 		printf("%s: radio is disabled by hardware switch\n",
4474 		    DEVNAME(sc));
4475 		return EPERM;
4476 	}
4477 
4478 	s = splnet();
4479 	sc->sc_init_complete = 0;
4480 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
4481 	if (err) {
4482 		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4483 		splx(s);
4484 		return err;
4485 	}
4486 
4487 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
4488 		err = iwm_send_bt_init_conf(sc);
4489 		if (err) {
4490 			printf("%s: could not init bt coex (error %d)\n",
4491 			    DEVNAME(sc), err);
4492 			splx(s);
4493 			return err;
4494 		}
4495 	}
4496 
4497 	if (justnvm) {
4498 		err = iwm_nvm_init(sc);
4499 		if (err) {
4500 			printf("%s: failed to read nvm\n", DEVNAME(sc));
4501 			splx(s);
4502 			return err;
4503 		}
4504 
4505 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4506 			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4507 			    sc->sc_nvm.hw_addr);
4508 
4509 		splx(s);
4510 		return 0;
4511 	}
4512 
4513 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
4514 	if (err) {
4515 		splx(s);
4516 		return err;
4517 	}
4518 
4519 	/* Send TX valid antennas before triggering calibrations */
4520 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
4521 	if (err) {
4522 		splx(s);
4523 		return err;
4524 	}
4525 
4526 	/*
4527 	 * Send phy configurations command to init uCode
4528 	 * to start the 16.0 uCode init image internal calibrations.
4529 	 */
4530 	err = iwm_send_phy_cfg_cmd(sc);
4531 	if (err) {
4532 		splx(s);
4533 		return err;
4534 	}
4535 
4536 	/*
4537 	 * Nothing to do but wait for the init complete and phy DB
4538 	 * notifications from the firmware.
4539 	 */
4540 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4541 		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
4542 		    SEC_TO_NSEC(2));
4543 		if (err)
4544 			break;
4545 	}
4546 
4547 	splx(s);
4548 	return err;
4549 }
4550 
4551 int
iwm_config_ltr(struct iwm_softc * sc)4552 iwm_config_ltr(struct iwm_softc *sc)
4553 {
4554 	struct iwm_ltr_config_cmd cmd = {
4555 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
4556 	};
4557 
4558 	if (!sc->sc_ltr_enabled)
4559 		return 0;
4560 
4561 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4562 }
4563 
4564 int
iwm_rx_addbuf(struct iwm_softc * sc,int size,int idx)4565 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
4566 {
4567 	struct iwm_rx_ring *ring = &sc->rxq;
4568 	struct iwm_rx_data *data = &ring->data[idx];
4569 	struct mbuf *m;
4570 	int err;
4571 	int fatal = 0;
4572 
4573 	m = m_gethdr(M_DONTWAIT, MT_DATA);
4574 	if (m == NULL)
4575 		return ENOBUFS;
4576 
4577 	if (size <= MCLBYTES) {
4578 		MCLGET(m, M_DONTWAIT);
4579 	} else {
4580 		MCLGETL(m, M_DONTWAIT, IWM_RBUF_SIZE);
4581 	}
4582 	if ((m->m_flags & M_EXT) == 0) {
4583 		m_freem(m);
4584 		return ENOBUFS;
4585 	}
4586 
4587 	if (data->m != NULL) {
4588 		bus_dmamap_unload(sc->sc_dmat, data->map);
4589 		fatal = 1;
4590 	}
4591 
4592 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4593 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4594 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4595 	if (err) {
4596 		/* XXX */
4597 		if (fatal)
4598 			panic("iwm: could not load RX mbuf");
4599 		m_freem(m);
4600 		return err;
4601 	}
4602 	data->m = m;
4603 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4604 
4605 	/* Update RX descriptor. */
4606 	if (sc->sc_mqrx_supported) {
4607 		((uint64_t *)ring->desc)[idx] =
4608 		    htole64(data->map->dm_segs[0].ds_addr);
4609 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4610 		    idx * sizeof(uint64_t), sizeof(uint64_t),
4611 		    BUS_DMASYNC_PREWRITE);
4612 	} else {
4613 		((uint32_t *)ring->desc)[idx] =
4614 		    htole32(data->map->dm_segs[0].ds_addr >> 8);
4615 		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4616 		    idx * sizeof(uint32_t), sizeof(uint32_t),
4617 		    BUS_DMASYNC_PREWRITE);
4618 	}
4619 
4620 	return 0;
4621 }
4622 
4623 /*
4624  * RSSI values are reported by the FW as positive values - need to negate
4625  * to obtain their dBM.  Account for missing antennas by replacing 0
4626  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
4627  */
4628 int
iwm_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_phy_info * phy_info)4629 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
4630 {
4631 	int energy_a, energy_b, energy_c, max_energy;
4632 	uint32_t val;
4633 
4634 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
4635 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
4636 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
4637 	energy_a = energy_a ? -energy_a : -256;
4638 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
4639 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
4640 	energy_b = energy_b ? -energy_b : -256;
4641 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
4642 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
4643 	energy_c = energy_c ? -energy_c : -256;
4644 	max_energy = MAX(energy_a, energy_b);
4645 	max_energy = MAX(max_energy, energy_c);
4646 
4647 	return max_energy;
4648 }
4649 
4650 int
iwm_rxmq_get_signal_strength(struct iwm_softc * sc,struct iwm_rx_mpdu_desc * desc)4651 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
4652     struct iwm_rx_mpdu_desc *desc)
4653 {
4654 	int energy_a, energy_b;
4655 
4656 	energy_a = desc->v1.energy_a;
4657 	energy_b = desc->v1.energy_b;
4658 	energy_a = energy_a ? -energy_a : -256;
4659 	energy_b = energy_b ? -energy_b : -256;
4660 	return MAX(energy_a, energy_b);
4661 }
4662 
4663 void
iwm_rx_rx_phy_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)4664 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4665     struct iwm_rx_data *data)
4666 {
4667 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
4668 
4669 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4670 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4671 
4672 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4673 }
4674 
4675 /*
4676  * Retrieve the average noise (in dBm) among receivers.
4677  */
4678 int
iwm_get_noise(const struct iwm_statistics_rx_non_phy * stats)4679 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
4680 {
4681 	int i, total, nbant, noise;
4682 
4683 	total = nbant = noise = 0;
4684 	for (i = 0; i < 3; i++) {
4685 		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4686 		if (noise) {
4687 			total += noise;
4688 			nbant++;
4689 		}
4690 	}
4691 
4692 	/* There should be at least one antenna but check anyway. */
4693 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4694 }
4695 
4696 int
iwm_ccmp_decap(struct iwm_softc * sc,struct mbuf * m,struct ieee80211_node * ni,struct ieee80211_rxinfo * rxi)4697 iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4698     struct ieee80211_rxinfo *rxi)
4699 {
4700 	struct ieee80211com *ic = &sc->sc_ic;
4701 	struct ieee80211_key *k = &ni->ni_pairwise_key;
4702 	struct ieee80211_frame *wh;
4703 	uint64_t pn, *prsc;
4704 	uint8_t *ivp;
4705 	uint8_t tid;
4706 	int hdrlen, hasqos;
4707 
4708 	wh = mtod(m, struct ieee80211_frame *);
4709 	hdrlen = ieee80211_get_hdrlen(wh);
4710 	ivp = (uint8_t *)wh + hdrlen;
4711 
4712 	/* Check that ExtIV bit is set. */
4713 	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4714 		return 1;
4715 
4716 	hasqos = ieee80211_has_qos(wh);
4717 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4718 	prsc = &k->k_rsc[tid];
4719 
4720 	/* Extract the 48-bit PN from the CCMP header. */
4721 	pn = (uint64_t)ivp[0]       |
4722 	     (uint64_t)ivp[1] <<  8 |
4723 	     (uint64_t)ivp[4] << 16 |
4724 	     (uint64_t)ivp[5] << 24 |
4725 	     (uint64_t)ivp[6] << 32 |
4726 	     (uint64_t)ivp[7] << 40;
4727 	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4728 		if (pn < *prsc) {
4729 			ic->ic_stats.is_ccmp_replays++;
4730 			return 1;
4731 		}
4732 	} else if (pn <= *prsc) {
4733 		ic->ic_stats.is_ccmp_replays++;
4734 		return 1;
4735 	}
4736 	/* Last seen packet number is updated in ieee80211_inputm(). */
4737 
4738 	/*
4739 	 * Some firmware versions strip the MIC, and some don't. It is not
4740 	 * clear which of the capability flags could tell us what to expect.
4741 	 * For now, keep things simple and just leave the MIC in place if
4742 	 * it is present.
4743 	 *
4744 	 * The IV will be stripped by ieee80211_inputm().
4745 	 */
4746 	return 0;
4747 }
4748 
4749 int
iwm_rx_hwdecrypt(struct iwm_softc * sc,struct mbuf * m,uint32_t rx_pkt_status,struct ieee80211_rxinfo * rxi)4750 iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4751     struct ieee80211_rxinfo *rxi)
4752 {
4753 	struct ieee80211com *ic = &sc->sc_ic;
4754 	struct ifnet *ifp = IC2IFP(ic);
4755 	struct ieee80211_frame *wh;
4756 	struct ieee80211_node *ni;
4757 	int ret = 0;
4758 	uint8_t type, subtype;
4759 
4760 	wh = mtod(m, struct ieee80211_frame *);
4761 
4762 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4763 	if (type == IEEE80211_FC0_TYPE_CTL)
4764 		return 0;
4765 
4766 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4767 	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4768 		return 0;
4769 
4770 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4771 	    !(wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4772 		return 0;
4773 
4774 	ni = ieee80211_find_rxnode(ic, wh);
4775 	/* Handle hardware decryption. */
4776 	if ((ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4777 	    ni->ni_pairwise_key.k_cipher == IEEE80211_CIPHER_CCMP) {
4778 		if ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4779 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4780 			ic->ic_stats.is_ccmp_dec_errs++;
4781 			ret = 1;
4782 			goto out;
4783 		}
4784 		/* Check whether decryption was successful or not. */
4785 		if ((rx_pkt_status &
4786 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4787 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) !=
4788 		    (IWM_RX_MPDU_RES_STATUS_DEC_DONE |
4789 		    IWM_RX_MPDU_RES_STATUS_MIC_OK)) {
4790 			ic->ic_stats.is_ccmp_dec_errs++;
4791 			ret = 1;
4792 			goto out;
4793 		}
4794 		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4795 	}
4796 out:
4797 	if (ret)
4798 		ifp->if_ierrors++;
4799 	ieee80211_release_node(ic, ni);
4800 	return ret;
4801 }
4802 
4803 void
iwm_rx_frame(struct iwm_softc * sc,struct mbuf * m,int chanidx,uint32_t rx_pkt_status,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,struct ieee80211_rxinfo * rxi,struct mbuf_list * ml)4804 iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4805     uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4806     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4807     struct mbuf_list *ml)
4808 {
4809 	struct ieee80211com *ic = &sc->sc_ic;
4810 	struct ifnet *ifp = IC2IFP(ic);
4811 	struct ieee80211_frame *wh;
4812 	struct ieee80211_node *ni;
4813 
4814 	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4815 		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4816 
4817 	wh = mtod(m, struct ieee80211_frame *);
4818 	ni = ieee80211_find_rxnode(ic, wh);
4819 	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4820 	    iwm_ccmp_decap(sc, m, ni, rxi) != 0) {
4821 		ifp->if_ierrors++;
4822 		m_freem(m);
4823 		ieee80211_release_node(ic, ni);
4824 		return;
4825 	}
4826 
4827 #if NBPFILTER > 0
4828 	if (sc->sc_drvbpf != NULL) {
4829 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4830 		uint16_t chan_flags;
4831 
4832 		tap->wr_flags = 0;
4833 		if (is_shortpre)
4834 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4835 		tap->wr_chan_freq =
4836 		    htole16(ic->ic_channels[chanidx].ic_freq);
4837 		chan_flags = ic->ic_channels[chanidx].ic_flags;
4838 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4839 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4840 			chan_flags &= ~IEEE80211_CHAN_HT;
4841 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4842 		}
4843 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4844 			chan_flags &= ~IEEE80211_CHAN_VHT;
4845 		tap->wr_chan_flags = htole16(chan_flags);
4846 		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4847 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4848 		tap->wr_tsft = device_timestamp;
4849 		if (rate_n_flags & IWM_RATE_MCS_HT_MSK) {
4850 			uint8_t mcs = (rate_n_flags &
4851 			    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
4852 			    IWM_RATE_HT_MCS_NSS_MSK));
4853 			tap->wr_rate = (0x80 | mcs);
4854 		} else {
4855 			uint8_t rate = (rate_n_flags &
4856 			    IWM_RATE_LEGACY_RATE_MSK);
4857 			switch (rate) {
4858 			/* CCK rates. */
4859 			case  10: tap->wr_rate =   2; break;
4860 			case  20: tap->wr_rate =   4; break;
4861 			case  55: tap->wr_rate =  11; break;
4862 			case 110: tap->wr_rate =  22; break;
4863 			/* OFDM rates. */
4864 			case 0xd: tap->wr_rate =  12; break;
4865 			case 0xf: tap->wr_rate =  18; break;
4866 			case 0x5: tap->wr_rate =  24; break;
4867 			case 0x7: tap->wr_rate =  36; break;
4868 			case 0x9: tap->wr_rate =  48; break;
4869 			case 0xb: tap->wr_rate =  72; break;
4870 			case 0x1: tap->wr_rate =  96; break;
4871 			case 0x3: tap->wr_rate = 108; break;
4872 			/* Unknown rate: should not happen. */
4873 			default:  tap->wr_rate =   0;
4874 			}
4875 		}
4876 
4877 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4878 		    m, BPF_DIRECTION_IN);
4879 	}
4880 #endif
4881 	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4882 	ieee80211_release_node(ic, ni);
4883 }
4884 
4885 void
iwm_rx_mpdu(struct iwm_softc * sc,struct mbuf * m,void * pktdata,size_t maxlen,struct mbuf_list * ml)4886 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4887     size_t maxlen, struct mbuf_list *ml)
4888 {
4889 	struct ieee80211com *ic = &sc->sc_ic;
4890 	struct ieee80211_rxinfo rxi;
4891 	struct iwm_rx_phy_info *phy_info;
4892 	struct iwm_rx_mpdu_res_start *rx_res;
4893 	int device_timestamp;
4894 	uint16_t phy_flags;
4895 	uint32_t len;
4896 	uint32_t rx_pkt_status;
4897 	int rssi, chanidx, rate_n_flags;
4898 
4899 	memset(&rxi, 0, sizeof(rxi));
4900 
4901 	phy_info = &sc->sc_last_phy_info;
4902 	rx_res = (struct iwm_rx_mpdu_res_start *)pktdata;
4903 	len = le16toh(rx_res->byte_count);
4904 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4905 		/* Allow control frames in monitor mode. */
4906 		if (len < sizeof(struct ieee80211_frame_cts)) {
4907 			ic->ic_stats.is_rx_tooshort++;
4908 			IC2IFP(ic)->if_ierrors++;
4909 			m_freem(m);
4910 			return;
4911 		}
4912 	} else if (len < sizeof(struct ieee80211_frame)) {
4913 		ic->ic_stats.is_rx_tooshort++;
4914 		IC2IFP(ic)->if_ierrors++;
4915 		m_freem(m);
4916 		return;
4917 	}
4918 	if (len > maxlen - sizeof(*rx_res)) {
4919 		IC2IFP(ic)->if_ierrors++;
4920 		m_freem(m);
4921 		return;
4922 	}
4923 
4924 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
4925 		m_freem(m);
4926 		return;
4927 	}
4928 
4929 	rx_pkt_status = le32toh(*(uint32_t *)(pktdata + sizeof(*rx_res) + len));
4930 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4931 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4932 		m_freem(m);
4933 		return; /* drop */
4934 	}
4935 
4936 	m->m_data = pktdata + sizeof(*rx_res);
4937 	m->m_pkthdr.len = m->m_len = len;
4938 
4939 	if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) {
4940 		m_freem(m);
4941 		return;
4942 	}
4943 
4944 	chanidx = letoh32(phy_info->channel);
4945 	device_timestamp = le32toh(phy_info->system_timestamp);
4946 	phy_flags = letoh16(phy_info->phy_flags);
4947 	rate_n_flags = le32toh(phy_info->rate_n_flags);
4948 
4949 	rssi = iwm_get_signal_strength(sc, phy_info);
4950 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
4951 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
4952 
4953 	rxi.rxi_rssi = rssi;
4954 	rxi.rxi_tstamp = device_timestamp;
4955 	rxi.rxi_chan = chanidx;
4956 
4957 	iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4958 	    (phy_flags & IWM_PHY_INFO_FLAG_SHPREAMBLE),
4959 	    rate_n_flags, device_timestamp, &rxi, ml);
4960 }
4961 
4962 void
iwm_flip_address(uint8_t * addr)4963 iwm_flip_address(uint8_t *addr)
4964 {
4965 	int i;
4966 	uint8_t mac_addr[ETHER_ADDR_LEN];
4967 
4968 	for (i = 0; i < ETHER_ADDR_LEN; i++)
4969 		mac_addr[i] = addr[ETHER_ADDR_LEN - i - 1];
4970 	IEEE80211_ADDR_COPY(addr, mac_addr);
4971 }
4972 
4973 /*
4974  * Drop duplicate 802.11 retransmissions
4975  * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4976  * and handle pseudo-duplicate frames which result from deaggregation
4977  * of A-MSDU frames in hardware.
4978  */
4979 int
iwm_detect_duplicate(struct iwm_softc * sc,struct mbuf * m,struct iwm_rx_mpdu_desc * desc,struct ieee80211_rxinfo * rxi)4980 iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m,
4981     struct iwm_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4982 {
4983 	struct ieee80211com *ic = &sc->sc_ic;
4984 	struct iwm_node *in = (void *)ic->ic_bss;
4985 	struct iwm_rxq_dup_data *dup_data = &in->dup_data;
4986 	uint8_t tid = IWM_MAX_TID_COUNT, subframe_idx;
4987 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4988 	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4989 	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4990 	int hasqos = ieee80211_has_qos(wh);
4991 	uint16_t seq;
4992 
4993 	if (type == IEEE80211_FC0_TYPE_CTL ||
4994 	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4995 	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4996 		return 0;
4997 
4998 	if (hasqos) {
4999 		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
5000 		if (tid > IWM_MAX_TID_COUNT)
5001 			tid = IWM_MAX_TID_COUNT;
5002 	}
5003 
5004 	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
5005 	subframe_idx = desc->amsdu_info &
5006 		IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
5007 
5008 	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
5009 	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
5010 	    dup_data->last_seq[tid] == seq &&
5011 	    dup_data->last_sub_frame[tid] >= subframe_idx)
5012 		return 1;
5013 
5014 	/*
5015 	 * Allow the same frame sequence number for all A-MSDU subframes
5016 	 * following the first subframe.
5017 	 * Otherwise these subframes would be discarded as replays.
5018 	 */
5019 	if (dup_data->last_seq[tid] == seq &&
5020 	    subframe_idx > dup_data->last_sub_frame[tid] &&
5021 	    (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU)) {
5022 		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
5023 	}
5024 
5025 	dup_data->last_seq[tid] = seq;
5026 	dup_data->last_sub_frame[tid] = subframe_idx;
5027 
5028 	return 0;
5029 }
5030 
5031 /*
5032  * Returns true if sn2 - buffer_size < sn1 < sn2.
5033  * To be used only in order to compare reorder buffer head with NSSN.
5034  * We fully trust NSSN unless it is behind us due to reorder timeout.
5035  * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
5036  */
5037 int
iwm_is_sn_less(uint16_t sn1,uint16_t sn2,uint16_t buffer_size)5038 iwm_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
5039 {
5040 	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
5041 }
5042 
5043 void
iwm_release_frames(struct iwm_softc * sc,struct ieee80211_node * ni,struct iwm_rxba_data * rxba,struct iwm_reorder_buffer * reorder_buf,uint16_t nssn,struct mbuf_list * ml)5044 iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni,
5045     struct iwm_rxba_data *rxba, struct iwm_reorder_buffer *reorder_buf,
5046     uint16_t nssn, struct mbuf_list *ml)
5047 {
5048 	struct iwm_reorder_buf_entry *entries = &rxba->entries[0];
5049 	uint16_t ssn = reorder_buf->head_sn;
5050 
5051 	/* ignore nssn smaller than head sn - this can happen due to timeout */
5052 	if (iwm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
5053 		goto set_timer;
5054 
5055 	while (iwm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
5056 		int index = ssn % reorder_buf->buf_size;
5057 		struct mbuf *m;
5058 		int chanidx, is_shortpre;
5059 		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
5060 		struct ieee80211_rxinfo *rxi;
5061 
5062 		/* This data is the same for all A-MSDU subframes. */
5063 		chanidx = entries[index].chanidx;
5064 		rx_pkt_status = entries[index].rx_pkt_status;
5065 		is_shortpre = entries[index].is_shortpre;
5066 		rate_n_flags = entries[index].rate_n_flags;
5067 		device_timestamp = entries[index].device_timestamp;
5068 		rxi = &entries[index].rxi;
5069 
5070 		/*
5071 		 * Empty the list. Will have more than one frame for A-MSDU.
5072 		 * Empty list is valid as well since nssn indicates frames were
5073 		 * received.
5074 		 */
5075 		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
5076 			iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
5077 			    rate_n_flags, device_timestamp, rxi, ml);
5078 			reorder_buf->num_stored--;
5079 
5080 			/*
5081 			 * Allow the same frame sequence number and CCMP PN for
5082 			 * all A-MSDU subframes following the first subframe.
5083 			 * Otherwise they would be discarded as replays.
5084 			 */
5085 			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
5086 			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5087 		}
5088 
5089 		ssn = (ssn + 1) & 0xfff;
5090 	}
5091 	reorder_buf->head_sn = nssn;
5092 
5093 set_timer:
5094 	if (reorder_buf->num_stored && !reorder_buf->removed) {
5095 		timeout_add_usec(&reorder_buf->reorder_timer,
5096 		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
5097 	} else
5098 		timeout_del(&reorder_buf->reorder_timer);
5099 }
5100 
5101 int
iwm_oldsn_workaround(struct iwm_softc * sc,struct ieee80211_node * ni,int tid,struct iwm_reorder_buffer * buffer,uint32_t reorder_data,uint32_t gp2)5102 iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid,
5103     struct iwm_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
5104 {
5105 	struct ieee80211com *ic = &sc->sc_ic;
5106 
5107 	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
5108 		/* we have a new (A-)MPDU ... */
5109 
5110 		/*
5111 		 * reset counter to 0 if we didn't have any oldsn in
5112 		 * the last A-MPDU (as detected by GP2 being identical)
5113 		 */
5114 		if (!buffer->consec_oldsn_prev_drop)
5115 			buffer->consec_oldsn_drops = 0;
5116 
5117 		/* either way, update our tracking state */
5118 		buffer->consec_oldsn_ampdu_gp2 = gp2;
5119 	} else if (buffer->consec_oldsn_prev_drop) {
5120 		/*
5121 		 * tracking state didn't change, and we had an old SN
5122 		 * indication before - do nothing in this case, we
5123 		 * already noted this one down and are waiting for the
5124 		 * next A-MPDU (by GP2)
5125 		 */
5126 		return 0;
5127 	}
5128 
5129 	/* return unless this MPDU has old SN */
5130 	if (!(reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN))
5131 		return 0;
5132 
5133 	/* update state */
5134 	buffer->consec_oldsn_prev_drop = 1;
5135 	buffer->consec_oldsn_drops++;
5136 
5137 	/* if limit is reached, send del BA and reset state */
5138 	if (buffer->consec_oldsn_drops == IWM_AMPDU_CONSEC_DROPS_DELBA) {
5139 		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
5140 		    0, tid);
5141 		buffer->consec_oldsn_prev_drop = 0;
5142 		buffer->consec_oldsn_drops = 0;
5143 		return 1;
5144 	}
5145 
5146 	return 0;
5147 }
5148 
5149 /*
5150  * Handle re-ordering of frames which were de-aggregated in hardware.
5151  * Returns 1 if the MPDU was consumed (buffered or dropped).
5152  * Returns 0 if the MPDU should be passed to upper layer.
5153  */
5154 int
iwm_rx_reorder(struct iwm_softc * sc,struct mbuf * m,int chanidx,struct iwm_rx_mpdu_desc * desc,int is_shortpre,int rate_n_flags,uint32_t device_timestamp,struct ieee80211_rxinfo * rxi,struct mbuf_list * ml)5155 iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx,
5156     struct iwm_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
5157     uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
5158     struct mbuf_list *ml)
5159 {
5160 	struct ieee80211com *ic = &sc->sc_ic;
5161 	struct ieee80211_frame *wh;
5162 	struct ieee80211_node *ni;
5163 	struct iwm_rxba_data *rxba;
5164 	struct iwm_reorder_buffer *buffer;
5165 	uint32_t reorder_data = le32toh(desc->reorder_data);
5166 	int is_amsdu = (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU);
5167 	int last_subframe =
5168 		(desc->amsdu_info & IWM_RX_MPDU_AMSDU_LAST_SUBFRAME);
5169 	uint8_t tid;
5170 	uint8_t subframe_idx = (desc->amsdu_info &
5171 	    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5172 	struct iwm_reorder_buf_entry *entries;
5173 	int index;
5174 	uint16_t nssn, sn;
5175 	uint8_t baid, type, subtype;
5176 	int hasqos;
5177 
5178 	wh = mtod(m, struct ieee80211_frame *);
5179 	hasqos = ieee80211_has_qos(wh);
5180 	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
5181 
5182 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5183 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
5184 
5185 	/*
5186 	 * We are only interested in Block Ack requests and unicast QoS data.
5187 	 */
5188 	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
5189 		return 0;
5190 	if (hasqos) {
5191 		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
5192 			return 0;
5193 	} else {
5194 		if (type != IEEE80211_FC0_TYPE_CTL ||
5195 		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
5196 			return 0;
5197 	}
5198 
5199 	baid = (reorder_data & IWM_RX_MPDU_REORDER_BAID_MASK) >>
5200 		IWM_RX_MPDU_REORDER_BAID_SHIFT;
5201 	if (baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
5202 	    baid >= nitems(sc->sc_rxba_data))
5203 		return 0;
5204 
5205 	rxba = &sc->sc_rxba_data[baid];
5206 	if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID ||
5207 	    tid != rxba->tid || rxba->sta_id != IWM_STATION_ID)
5208 		return 0;
5209 
5210 	if (rxba->timeout != 0)
5211 		getmicrouptime(&rxba->last_rx);
5212 
5213 	/* Bypass A-MPDU re-ordering in net80211. */
5214 	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
5215 
5216 	nssn = reorder_data & IWM_RX_MPDU_REORDER_NSSN_MASK;
5217 	sn = (reorder_data & IWM_RX_MPDU_REORDER_SN_MASK) >>
5218 		IWM_RX_MPDU_REORDER_SN_SHIFT;
5219 
5220 	buffer = &rxba->reorder_buf;
5221 	entries = &rxba->entries[0];
5222 
5223 	if (!buffer->valid) {
5224 		if (reorder_data & IWM_RX_MPDU_REORDER_BA_OLD_SN)
5225 			return 0;
5226 		buffer->valid = 1;
5227 	}
5228 
5229 	ni = ieee80211_find_rxnode(ic, wh);
5230 	if (type == IEEE80211_FC0_TYPE_CTL &&
5231 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
5232 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5233 		goto drop;
5234 	}
5235 
5236 	/*
5237 	 * If there was a significant jump in the nssn - adjust.
5238 	 * If the SN is smaller than the NSSN it might need to first go into
5239 	 * the reorder buffer, in which case we just release up to it and the
5240 	 * rest of the function will take care of storing it and releasing up to
5241 	 * the nssn.
5242 	 */
5243 	if (!iwm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5244 	    buffer->buf_size) ||
5245 	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
5246 		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
5247 		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5248 		iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5249 	}
5250 
5251 	if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5252 	    device_timestamp)) {
5253 		 /* BA session will be torn down. */
5254 		ic->ic_stats.is_ht_rx_ba_window_jump++;
5255 		goto drop;
5256 
5257 	}
5258 
5259 	/* drop any outdated packets */
5260 	if (SEQ_LT(sn, buffer->head_sn)) {
5261 		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5262 		goto drop;
5263 	}
5264 
5265 	/* release immediately if allowed by nssn and no stored frames */
5266 	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
5267 		if (iwm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5268 		   (!is_amsdu || last_subframe))
5269 			buffer->head_sn = nssn;
5270 		ieee80211_release_node(ic, ni);
5271 		return 0;
5272 	}
5273 
5274 	/*
5275 	 * release immediately if there are no stored frames, and the sn is
5276 	 * equal to the head.
5277 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
5278 	 * When we released everything, and we got the next frame in the
5279 	 * sequence, according to the NSSN we can't release immediately,
5280 	 * while technically there is no hole and we can move forward.
5281 	 */
5282 	if (!buffer->num_stored && sn == buffer->head_sn) {
5283 		if (!is_amsdu || last_subframe)
5284 			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5285 		ieee80211_release_node(ic, ni);
5286 		return 0;
5287 	}
5288 
5289 	index = sn % buffer->buf_size;
5290 
5291 	/*
5292 	 * Check if we already stored this frame
5293 	 * As AMSDU is either received or not as whole, logic is simple:
5294 	 * If we have frames in that position in the buffer and the last frame
5295 	 * originated from AMSDU had a different SN then it is a retransmission.
5296 	 * If it is the same SN then if the subframe index is incrementing it
5297 	 * is the same AMSDU - otherwise it is a retransmission.
5298 	 */
5299 	if (!ml_empty(&entries[index].frames)) {
5300 		if (!is_amsdu) {
5301 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5302 			goto drop;
5303 		} else if (sn != buffer->last_amsdu ||
5304 		    buffer->last_sub_index >= subframe_idx) {
5305 			ic->ic_stats.is_ht_rx_ba_no_buf++;
5306 			goto drop;
5307 		}
5308 	} else {
5309 		/* This data is the same for all A-MSDU subframes. */
5310 		entries[index].chanidx = chanidx;
5311 		entries[index].is_shortpre = is_shortpre;
5312 		entries[index].rate_n_flags = rate_n_flags;
5313 		entries[index].device_timestamp = device_timestamp;
5314 		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
5315 	}
5316 
5317 	/* put in reorder buffer */
5318 	ml_enqueue(&entries[index].frames, m);
5319 	buffer->num_stored++;
5320 	getmicrouptime(&entries[index].reorder_time);
5321 
5322 	if (is_amsdu) {
5323 		buffer->last_amsdu = sn;
5324 		buffer->last_sub_index = subframe_idx;
5325 	}
5326 
5327 	/*
5328 	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5329 	 * The reason is that NSSN advances on the first sub-frame, and may
5330 	 * cause the reorder buffer to advance before all the sub-frames arrive.
5331 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5332 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5333 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5334 	 * already ahead and it will be dropped.
5335 	 * If the last sub-frame is not on this queue - we will get frame
5336 	 * release notification with up to date NSSN.
5337 	 */
5338 	if (!is_amsdu || last_subframe)
5339 		iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5340 
5341 	ieee80211_release_node(ic, ni);
5342 	return 1;
5343 
5344 drop:
5345 	m_freem(m);
5346 	ieee80211_release_node(ic, ni);
5347 	return 1;
5348 }
5349 
5350 void
iwm_rx_mpdu_mq(struct iwm_softc * sc,struct mbuf * m,void * pktdata,size_t maxlen,struct mbuf_list * ml)5351 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
5352     size_t maxlen, struct mbuf_list *ml)
5353 {
5354 	struct ieee80211com *ic = &sc->sc_ic;
5355 	struct ieee80211_rxinfo rxi;
5356 	struct iwm_rx_mpdu_desc *desc;
5357 	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5358 	int rssi;
5359 	uint8_t chanidx;
5360 	uint16_t phy_info;
5361 
5362 	memset(&rxi, 0, sizeof(rxi));
5363 
5364 	desc = (struct iwm_rx_mpdu_desc *)pktdata;
5365 
5366 	if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
5367 	    !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
5368 		m_freem(m);
5369 		return; /* drop */
5370 	}
5371 
5372 	len = le16toh(desc->mpdu_len);
5373 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5374 		/* Allow control frames in monitor mode. */
5375 		if (len < sizeof(struct ieee80211_frame_cts)) {
5376 			ic->ic_stats.is_rx_tooshort++;
5377 			IC2IFP(ic)->if_ierrors++;
5378 			m_freem(m);
5379 			return;
5380 		}
5381 	} else if (len < sizeof(struct ieee80211_frame)) {
5382 		ic->ic_stats.is_rx_tooshort++;
5383 		IC2IFP(ic)->if_ierrors++;
5384 		m_freem(m);
5385 		return;
5386 	}
5387 	if (len > maxlen - sizeof(*desc)) {
5388 		IC2IFP(ic)->if_ierrors++;
5389 		m_freem(m);
5390 		return;
5391 	}
5392 
5393 	m->m_data = pktdata + sizeof(*desc);
5394 	m->m_pkthdr.len = m->m_len = len;
5395 
5396 	/* Account for padding following the frame header. */
5397 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD) {
5398 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5399 		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5400 		if (type == IEEE80211_FC0_TYPE_CTL) {
5401 			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
5402 			case IEEE80211_FC0_SUBTYPE_CTS:
5403 				hdrlen = sizeof(struct ieee80211_frame_cts);
5404 				break;
5405 			case IEEE80211_FC0_SUBTYPE_ACK:
5406 				hdrlen = sizeof(struct ieee80211_frame_ack);
5407 				break;
5408 			default:
5409 				hdrlen = sizeof(struct ieee80211_frame_min);
5410 				break;
5411 			}
5412 		} else
5413 			hdrlen = ieee80211_get_hdrlen(wh);
5414 
5415 		if ((le16toh(desc->status) &
5416 		    IWM_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
5417 		    IWM_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
5418 			/* Padding is inserted after the IV. */
5419 			hdrlen += IEEE80211_CCMP_HDRLEN;
5420 		}
5421 
5422 		memmove(m->m_data + 2, m->m_data, hdrlen);
5423 		m_adj(m, 2);
5424 	}
5425 
5426 	/*
5427 	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5428 	 * in place for each subframe. But it leaves the 'A-MSDU present'
5429 	 * bit set in the frame header. We need to clear this bit ourselves.
5430 	 *
5431 	 * And we must allow the same CCMP PN for subframes following the
5432 	 * first subframe. Otherwise they would be discarded as replays.
5433 	 */
5434 	if (desc->mac_flags2 & IWM_RX_MPDU_MFLG2_AMSDU) {
5435 		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5436 		uint8_t subframe_idx = (desc->amsdu_info &
5437 		    IWM_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5438 		if (subframe_idx > 0)
5439 			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5440 		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5441 		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5442 			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
5443 			    struct ieee80211_qosframe_addr4 *);
5444 			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5445 
5446 			/* HW reverses addr3 and addr4. */
5447 			iwm_flip_address(qwh4->i_addr3);
5448 			iwm_flip_address(qwh4->i_addr4);
5449 		} else if (ieee80211_has_qos(wh) &&
5450 		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
5451 			struct ieee80211_qosframe *qwh = mtod(m,
5452 			    struct ieee80211_qosframe *);
5453 			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5454 
5455 			/* HW reverses addr3. */
5456 			iwm_flip_address(qwh->i_addr3);
5457 		}
5458 	}
5459 
5460 	/*
5461 	 * Verify decryption before duplicate detection. The latter uses
5462 	 * the TID supplied in QoS frame headers and this TID is implicitly
5463 	 * verified as part of the CCMP nonce.
5464 	 */
5465 	if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5466 		m_freem(m);
5467 		return;
5468 	}
5469 
5470 	if (iwm_detect_duplicate(sc, m, desc, &rxi)) {
5471 		m_freem(m);
5472 		return;
5473 	}
5474 
5475 	phy_info = le16toh(desc->phy_info);
5476 	rate_n_flags = le32toh(desc->v1.rate_n_flags);
5477 	chanidx = desc->v1.channel;
5478 	device_timestamp = desc->v1.gp2_on_air_rise;
5479 
5480 	rssi = iwm_rxmq_get_signal_strength(sc, desc);
5481 	rssi = (0 - IWM_MIN_DBM) + rssi;	/* normalize */
5482 	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
5483 
5484 	rxi.rxi_rssi = rssi;
5485 	rxi.rxi_tstamp = le64toh(desc->v1.tsf_on_air_rise);
5486 	rxi.rxi_chan = chanidx;
5487 
5488 	if (iwm_rx_reorder(sc, m, chanidx, desc,
5489 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5490 	    rate_n_flags, device_timestamp, &rxi, ml))
5491 		return;
5492 
5493 	iwm_rx_frame(sc, m, chanidx, le16toh(desc->status),
5494 	    (phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE),
5495 	    rate_n_flags, device_timestamp, &rxi, ml);
5496 }
5497 
5498 void
iwm_ra_choose(struct iwm_softc * sc,struct ieee80211_node * ni)5499 iwm_ra_choose(struct iwm_softc *sc, struct ieee80211_node *ni)
5500 {
5501 	struct ieee80211com *ic = &sc->sc_ic;
5502 	struct iwm_node *in = (void *)ni;
5503 	int old_txmcs = ni->ni_txmcs;
5504 	int old_nss = ni->ni_vht_ss;
5505 
5506 	if (ni->ni_flags & IEEE80211_NODE_VHT)
5507 		ieee80211_ra_vht_choose(&in->in_rn_vht, ic, ni);
5508 	else
5509 		ieee80211_ra_choose(&in->in_rn, ic, ni);
5510 
5511 	/*
5512 	 * If RA has chosen a new TX rate we must update
5513 	 * the firmware's LQ rate table.
5514 	 */
5515 	if (ni->ni_txmcs != old_txmcs || ni->ni_vht_ss != old_nss)
5516 		iwm_setrates(in, 1);
5517 }
5518 
5519 void
iwm_ht_single_rate_control(struct iwm_softc * sc,struct ieee80211_node * ni,int txmcs,uint8_t failure_frame,int txfail)5520 iwm_ht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5521     int txmcs, uint8_t failure_frame, int txfail)
5522 {
5523 	struct ieee80211com *ic = &sc->sc_ic;
5524 	struct iwm_node *in = (void *)ni;
5525 
5526 	/* Ignore Tx reports which don't match our last LQ command. */
5527 	if (txmcs != ni->ni_txmcs) {
5528 		if (++in->lq_rate_mismatch > 15) {
5529 			/* Try to sync firmware with the driver... */
5530 			iwm_setrates(in, 1);
5531 			in->lq_rate_mismatch = 0;
5532 		}
5533 	} else {
5534 		int mcs = txmcs;
5535 		const struct ieee80211_ht_rateset *rs =
5536 		    ieee80211_ra_get_ht_rateset(txmcs,
5537 		        ieee80211_node_supports_ht_chan40(ni),
5538 			ieee80211_ra_use_ht_sgi(ni));
5539 		unsigned int retries = 0, i;
5540 
5541 		in->lq_rate_mismatch = 0;
5542 
5543 		for (i = 0; i < failure_frame; i++) {
5544 			if (mcs > rs->min_mcs) {
5545 				ieee80211_ra_add_stats_ht(&in->in_rn,
5546 				    ic, ni, mcs, 1, 1);
5547 				mcs--;
5548 			} else
5549 				retries++;
5550 		}
5551 
5552 		if (txfail && failure_frame == 0) {
5553 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5554 			    txmcs, 1, 1);
5555 		} else {
5556 			ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5557 			    mcs, retries + 1, retries);
5558 		}
5559 
5560 		iwm_ra_choose(sc, ni);
5561 	}
5562 }
5563 
5564 void
iwm_vht_single_rate_control(struct iwm_softc * sc,struct ieee80211_node * ni,int txmcs,int nss,uint8_t failure_frame,int txfail)5565 iwm_vht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5566     int txmcs, int nss, uint8_t failure_frame, int txfail)
5567 {
5568 	struct ieee80211com *ic = &sc->sc_ic;
5569 	struct iwm_node *in = (void *)ni;
5570 	uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
5571 	uint8_t sco = IEEE80211_HTOP0_SCO_SCN;
5572 
5573 	/* Ignore Tx reports which don't match our last LQ command. */
5574 	if (txmcs != ni->ni_txmcs || nss != ni->ni_vht_ss) {
5575 		if (++in->lq_rate_mismatch > 15) {
5576 			/* Try to sync firmware with the driver... */
5577 			iwm_setrates(in, 1);
5578 			in->lq_rate_mismatch = 0;
5579 		}
5580 	} else {
5581 		int mcs = txmcs;
5582 		unsigned int retries = 0, i;
5583 
5584 		if (in->in_phyctxt) {
5585 			vht_chan_width = in->in_phyctxt->vht_chan_width;
5586 			sco = in->in_phyctxt->sco;
5587 		}
5588 		in->lq_rate_mismatch = 0;
5589 
5590 		for (i = 0; i < failure_frame; i++) {
5591 			if (mcs > 0) {
5592 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5593 				    ic, ni, mcs, nss, 1, 1);
5594 				if (vht_chan_width >=
5595 				    IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5596 					/*
5597 					 * First 4 Tx attempts used same MCS,
5598 					 * twice at 80MHz and twice at 40MHz.
5599 					 */
5600 					if (i >= 4)
5601 						mcs--;
5602 				} else if (sco == IEEE80211_HTOP0_SCO_SCA ||
5603 				    sco == IEEE80211_HTOP0_SCO_SCB) {
5604 					/*
5605 					 * First 4 Tx attempts used same MCS,
5606 					 * four times at 40MHz.
5607 					 */
5608 					if (i >= 4)
5609 						mcs--;
5610 				} else
5611 					mcs--;
5612 			} else
5613 				retries++;
5614 		}
5615 
5616 		if (txfail && failure_frame == 0) {
5617 			ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni,
5618 			    txmcs, nss, 1, 1);
5619 		} else {
5620 			ieee80211_ra_vht_add_stats(&in->in_rn_vht, ic, ni,
5621 			    mcs, nss, retries + 1, retries);
5622 		}
5623 
5624 		iwm_ra_choose(sc, ni);
5625 	}
5626 }
5627 
5628 void
iwm_rx_tx_cmd_single(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_node * in,int txmcs,int txrate)5629 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5630     struct iwm_node *in, int txmcs, int txrate)
5631 {
5632 	struct ieee80211com *ic = &sc->sc_ic;
5633 	struct ieee80211_node *ni = &in->in_ni;
5634 	struct ifnet *ifp = IC2IFP(ic);
5635 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5636 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5637 	uint32_t initial_rate = le32toh(tx_resp->initial_rate);
5638 	int txfail;
5639 
5640 	KASSERT(tx_resp->frame_count == 1);
5641 
5642 	txfail = (status != IWM_TX_STATUS_SUCCESS &&
5643 	    status != IWM_TX_STATUS_DIRECT_DONE);
5644 
5645 	/*
5646 	 * Update rate control statistics.
5647 	 * Only report frames which were actually queued with the currently
5648 	 * selected Tx rate. Because Tx queues are relatively long we may
5649 	 * encounter previously selected rates here during Tx bursts.
5650 	 * Providing feedback based on such frames can lead to suboptimal
5651 	 * Tx rate control decisions.
5652 	 */
5653 	if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
5654 		if (txrate != ni->ni_txrate) {
5655 			if (++in->lq_rate_mismatch > 15) {
5656 				/* Try to sync firmware with the driver... */
5657 				iwm_setrates(in, 1);
5658 				in->lq_rate_mismatch = 0;
5659 			}
5660 		} else {
5661 			in->lq_rate_mismatch = 0;
5662 
5663 			in->in_amn.amn_txcnt++;
5664 			if (txfail)
5665 				in->in_amn.amn_retrycnt++;
5666 			if (tx_resp->failure_frame > 0)
5667 				in->in_amn.amn_retrycnt++;
5668 		}
5669 	} else if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5670 	    ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5671 	    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5672 		int txmcs = initial_rate & IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5673 		int nss = ((initial_rate & IWM_RATE_VHT_MCS_NSS_MSK) >>
5674 		    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5675 		iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5676 		    tx_resp->failure_frame, txfail);
5677 	} else if (ic->ic_fixed_mcs == -1 && ic->ic_state == IEEE80211_S_RUN &&
5678 	    (initial_rate & IWM_RATE_MCS_HT_MSK)) {
5679 		int txmcs = initial_rate &
5680 		    (IWM_RATE_HT_MCS_RATE_CODE_MSK | IWM_RATE_HT_MCS_NSS_MSK);
5681 		iwm_ht_single_rate_control(sc, ni, txmcs,
5682 		    tx_resp->failure_frame, txfail);
5683 	}
5684 
5685 	if (txfail)
5686 		ifp->if_oerrors++;
5687 }
5688 
5689 void
iwm_txd_done(struct iwm_softc * sc,struct iwm_tx_data * txd)5690 iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
5691 {
5692 	struct ieee80211com *ic = &sc->sc_ic;
5693 
5694 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5695 	    BUS_DMASYNC_POSTWRITE);
5696 	bus_dmamap_unload(sc->sc_dmat, txd->map);
5697 	m_freem(txd->m);
5698 	txd->m = NULL;
5699 
5700 	KASSERT(txd->in);
5701 	ieee80211_release_node(ic, &txd->in->in_ni);
5702 	txd->in = NULL;
5703 	txd->ampdu_nframes = 0;
5704 	txd->ampdu_txmcs = 0;
5705 	txd->ampdu_txnss = 0;
5706 }
5707 
5708 void
iwm_txq_advance(struct iwm_softc * sc,struct iwm_tx_ring * ring,int idx)5709 iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx)
5710 {
5711 	struct iwm_tx_data *txd;
5712 
5713 	while (ring->tail != idx) {
5714 		txd = &ring->data[ring->tail];
5715 		if (txd->m != NULL) {
5716 			iwm_reset_sched(sc, ring->qid, ring->tail, IWM_STATION_ID);
5717 			iwm_txd_done(sc, txd);
5718 			ring->queued--;
5719 		}
5720 		ring->tail = (ring->tail + 1) % IWM_TX_RING_COUNT;
5721 	}
5722 
5723 	wakeup(ring);
5724 }
5725 
5726 void
iwm_ampdu_tx_done(struct iwm_softc * sc,struct iwm_cmd_header * cmd_hdr,struct iwm_node * in,struct iwm_tx_ring * txq,uint32_t initial_rate,uint8_t nframes,uint8_t failure_frame,uint16_t ssn,int status,struct iwm_agg_tx_status * agg_status)5727 iwm_ampdu_tx_done(struct iwm_softc *sc, struct iwm_cmd_header *cmd_hdr,
5728     struct iwm_node *in, struct iwm_tx_ring *txq, uint32_t initial_rate,
5729     uint8_t nframes, uint8_t failure_frame, uint16_t ssn, int status,
5730     struct iwm_agg_tx_status *agg_status)
5731 {
5732 	struct ieee80211com *ic = &sc->sc_ic;
5733 	int tid = cmd_hdr->qid - IWM_FIRST_AGG_TX_QUEUE;
5734 	struct iwm_tx_data *txdata = &txq->data[cmd_hdr->idx];
5735 	struct ieee80211_node *ni = &in->in_ni;
5736 	struct ieee80211_tx_ba *ba;
5737 	int txfail = (status != IWM_TX_STATUS_SUCCESS &&
5738 	    status != IWM_TX_STATUS_DIRECT_DONE);
5739 	uint16_t seq;
5740 
5741 	if (ic->ic_state != IEEE80211_S_RUN)
5742 		return;
5743 
5744 	if (nframes > 1) {
5745 		int i;
5746  		/*
5747 		 * Collect information about this A-MPDU.
5748 		 */
5749 
5750 		for (i = 0; i < nframes; i++) {
5751 			uint8_t qid = agg_status[i].qid;
5752 			uint8_t idx = agg_status[i].idx;
5753 			uint16_t txstatus = (le16toh(agg_status[i].status) &
5754 			    IWM_AGG_TX_STATE_STATUS_MSK);
5755 
5756 			if (txstatus != IWM_AGG_TX_STATE_TRANSMITTED)
5757 				continue;
5758 
5759 			if (qid != cmd_hdr->qid)
5760 				continue;
5761 
5762 			txdata = &txq->data[idx];
5763 			if (txdata->m == NULL)
5764 				continue;
5765 
5766 			/* The Tx rate was the same for all subframes. */
5767 			if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5768 			    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5769 				txdata->ampdu_txmcs = initial_rate &
5770 				    IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5771 				txdata->ampdu_txnss = ((initial_rate &
5772 				    IWM_RATE_VHT_MCS_NSS_MSK) >>
5773 				    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5774 				txdata->ampdu_nframes = nframes;
5775 			} else if (initial_rate & IWM_RATE_MCS_HT_MSK) {
5776 				txdata->ampdu_txmcs = initial_rate &
5777 				    (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5778 				    IWM_RATE_HT_MCS_NSS_MSK);
5779 				txdata->ampdu_nframes = nframes;
5780 			}
5781 		}
5782 		return;
5783 	}
5784 
5785 	ba = &ni->ni_tx_ba[tid];
5786 	if (ba->ba_state != IEEE80211_BA_AGREED)
5787 		return;
5788 	if (SEQ_LT(ssn, ba->ba_winstart))
5789 		return;
5790 
5791 	/* This was a final single-frame Tx attempt for frame SSN-1. */
5792 	seq = (ssn - 1) & 0xfff;
5793 
5794 	/*
5795 	 * Skip rate control if our Tx rate is fixed.
5796 	 * Don't report frames to MiRA which were sent at a different
5797 	 * Tx rate than ni->ni_txmcs.
5798 	 */
5799 	if (ic->ic_fixed_mcs == -1) {
5800 		if (txdata->ampdu_nframes > 1) {
5801 			/*
5802 			 * This frame was once part of an A-MPDU.
5803 			 * Report one failed A-MPDU Tx attempt.
5804 			 * The firmware might have made several such
5805 			 * attempts but we don't keep track of this.
5806 			 */
5807 			if (ni->ni_flags & IEEE80211_NODE_VHT) {
5808 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5809 				    ic, ni, txdata->ampdu_txmcs,
5810 				    txdata->ampdu_txnss, 1, 1);
5811 			} else {
5812 				ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5813 				    txdata->ampdu_txmcs, 1, 1);
5814 			}
5815 		}
5816 
5817 		/* Report the final single-frame Tx attempt. */
5818 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
5819 		    (initial_rate & IWM_RATE_MCS_VHT_MSK)) {
5820 			int txmcs = initial_rate &
5821 			    IWM_RATE_VHT_MCS_RATE_CODE_MSK;
5822 			int nss = ((initial_rate &
5823 			    IWM_RATE_VHT_MCS_NSS_MSK) >>
5824 			    IWM_RATE_VHT_MCS_NSS_POS) + 1;
5825 			iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5826 			    failure_frame, txfail);
5827 		} else if (initial_rate & IWM_RATE_MCS_HT_MSK) {
5828 			int txmcs = initial_rate &
5829 			   (IWM_RATE_HT_MCS_RATE_CODE_MSK |
5830 			   IWM_RATE_HT_MCS_NSS_MSK);
5831 			iwm_ht_single_rate_control(sc, ni, txmcs,
5832 			    failure_frame, txfail);
5833 		}
5834 	}
5835 
5836 	if (txfail)
5837 		ieee80211_tx_compressed_bar(ic, ni, tid, ssn);
5838 
5839 	/*
5840 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
5841 	 * in firmware's BA window. Firmware is not going to retransmit any
5842 	 * frames before its BA window so mark them all as done.
5843 	 */
5844 	ieee80211_output_ba_move_window(ic, ni, tid, ssn);
5845 	iwm_txq_advance(sc, txq, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5846 	iwm_clear_oactive(sc, txq);
5847 }
5848 
5849 void
iwm_rx_tx_cmd(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)5850 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5851     struct iwm_rx_data *data)
5852 {
5853 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
5854 	int idx = cmd_hdr->idx;
5855 	int qid = cmd_hdr->qid;
5856 	struct iwm_tx_ring *ring = &sc->txq[qid];
5857 	struct iwm_tx_data *txd;
5858 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
5859 	uint32_t ssn;
5860 	uint32_t len = iwm_rx_packet_len(pkt);
5861 
5862 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
5863 	    BUS_DMASYNC_POSTREAD);
5864 
5865 	/* Sanity checks. */
5866 	if (sizeof(*tx_resp) > len)
5867 		return;
5868 	if (qid < IWM_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
5869 		return;
5870 	if (qid > IWM_LAST_AGG_TX_QUEUE)
5871 		return;
5872 	if (sizeof(*tx_resp) + sizeof(ssn) +
5873 	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
5874 		return;
5875 
5876 	sc->sc_tx_timer[qid] = 0;
5877 
5878 	txd = &ring->data[idx];
5879 	if (txd->m == NULL)
5880 		return;
5881 
5882 	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
5883 	ssn = le32toh(ssn) & 0xfff;
5884 	if (qid >= IWM_FIRST_AGG_TX_QUEUE) {
5885 		int status;
5886 		status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
5887 		iwm_ampdu_tx_done(sc, cmd_hdr, txd->in, ring,
5888 		    le32toh(tx_resp->initial_rate), tx_resp->frame_count,
5889 		    tx_resp->failure_frame, ssn, status, &tx_resp->status);
5890 	} else {
5891 		/*
5892 		 * Even though this is not an agg queue, we must only free
5893 		 * frames before the firmware's starting sequence number.
5894 		 */
5895 		iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
5896 		iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5897 		iwm_clear_oactive(sc, ring);
5898 	}
5899 }
5900 
5901 void
iwm_clear_oactive(struct iwm_softc * sc,struct iwm_tx_ring * ring)5902 iwm_clear_oactive(struct iwm_softc *sc, struct iwm_tx_ring *ring)
5903 {
5904 	struct ieee80211com *ic = &sc->sc_ic;
5905 	struct ifnet *ifp = IC2IFP(ic);
5906 
5907 	if (ring->queued < IWM_TX_RING_LOMARK) {
5908 		sc->qfullmsk &= ~(1 << ring->qid);
5909 		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5910 			ifq_clr_oactive(&ifp->if_snd);
5911 			/*
5912 			 * Well, we're in interrupt context, but then again
5913 			 * I guess net80211 does all sorts of stunts in
5914 			 * interrupt context, so maybe this is no biggie.
5915 			 */
5916 			(*ifp->if_start)(ifp);
5917 		}
5918 	}
5919 }
5920 
5921 void
iwm_ampdu_rate_control(struct iwm_softc * sc,struct ieee80211_node * ni,struct iwm_tx_ring * txq,int tid,uint16_t seq,uint16_t ssn)5922 iwm_ampdu_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5923     struct iwm_tx_ring *txq, int tid, uint16_t seq, uint16_t ssn)
5924 {
5925 	struct ieee80211com *ic = &sc->sc_ic;
5926 	struct iwm_node *in = (void *)ni;
5927 	int idx, end_idx;
5928 
5929 	/*
5930 	 * Update Tx rate statistics for A-MPDUs before firmware's BA window.
5931 	 */
5932 	idx = IWM_AGG_SSN_TO_TXQ_IDX(seq);
5933 	end_idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
5934 	while (idx != end_idx) {
5935 		struct iwm_tx_data *txdata = &txq->data[idx];
5936 		if (txdata->m != NULL && txdata->ampdu_nframes > 1) {
5937 			/*
5938 			 * We can assume that this subframe has been ACKed
5939 			 * because ACK failures come as single frames and
5940 			 * before failing an A-MPDU subframe the firmware
5941 			 * sends it as a single frame at least once.
5942 			 */
5943 			if (ni->ni_flags & IEEE80211_NODE_VHT) {
5944 				ieee80211_ra_vht_add_stats(&in->in_rn_vht,
5945 				    ic, ni, txdata->ampdu_txmcs,
5946 				    txdata->ampdu_txnss, 1, 0);
5947 			} else {
5948 				ieee80211_ra_add_stats_ht(&in->in_rn, ic, ni,
5949 				    txdata->ampdu_txmcs, 1, 0);
5950 			}
5951 			/* Report this frame only once. */
5952 			txdata->ampdu_nframes = 0;
5953 		}
5954 
5955 		idx = (idx + 1) % IWM_TX_RING_COUNT;
5956 	}
5957 
5958 	iwm_ra_choose(sc, ni);
5959 }
5960 
5961 void
iwm_rx_compressed_ba(struct iwm_softc * sc,struct iwm_rx_packet * pkt)5962 iwm_rx_compressed_ba(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
5963 {
5964 	struct iwm_ba_notif *ban = (void *)pkt->data;
5965 	struct ieee80211com *ic = &sc->sc_ic;
5966 	struct ieee80211_node *ni = ic->ic_bss;
5967 	struct iwm_node *in = (void *)ni;
5968 	struct ieee80211_tx_ba *ba;
5969 	struct iwm_tx_ring *ring;
5970 	uint16_t seq, ssn;
5971 	int qid;
5972 
5973 	if (ic->ic_state != IEEE80211_S_RUN)
5974 		return;
5975 
5976 	if (iwm_rx_packet_payload_len(pkt) < sizeof(*ban))
5977 		return;
5978 
5979 	if (ban->sta_id != IWM_STATION_ID ||
5980 	    !IEEE80211_ADDR_EQ(in->in_macaddr, ban->sta_addr))
5981 		return;
5982 
5983 	qid = le16toh(ban->scd_flow);
5984 	if (qid < IWM_FIRST_AGG_TX_QUEUE || qid > IWM_LAST_AGG_TX_QUEUE)
5985 		return;
5986 
5987 	/* Protect against a firmware bug where the queue/TID are off. */
5988 	if (qid != IWM_FIRST_AGG_TX_QUEUE + ban->tid)
5989 		return;
5990 
5991 	sc->sc_tx_timer[qid] = 0;
5992 
5993 	ba = &ni->ni_tx_ba[ban->tid];
5994 	if (ba->ba_state != IEEE80211_BA_AGREED)
5995 		return;
5996 
5997 	ring = &sc->txq[qid];
5998 
5999 	/*
6000 	 * The first bit in ban->bitmap corresponds to the sequence number
6001 	 * stored in the sequence control field ban->seq_ctl.
6002 	 * Multiple BA notifications in a row may be using this number, with
6003 	 * additional bits being set in cba->bitmap. It is unclear how the
6004 	 * firmware decides to shift this window forward.
6005 	 * We rely on ba->ba_winstart instead.
6006 	 */
6007 	seq = le16toh(ban->seq_ctl) >> IEEE80211_SEQ_SEQ_SHIFT;
6008 
6009 	/*
6010 	 * The firmware's new BA window starting sequence number
6011 	 * corresponds to the first hole in ban->scd_ssn, implying
6012 	 * that all frames between 'seq' and 'ssn' (non-inclusive)
6013 	 * have been acked.
6014 	 */
6015 	ssn = le16toh(ban->scd_ssn);
6016 
6017 	if (SEQ_LT(ssn, ba->ba_winstart))
6018 		return;
6019 
6020 	/* Skip rate control if our Tx rate is fixed. */
6021 	if (ic->ic_fixed_mcs == -1)
6022 		iwm_ampdu_rate_control(sc, ni, ring, ban->tid,
6023 		    ba->ba_winstart, ssn);
6024 
6025 	/*
6026 	 * SSN corresponds to the first (perhaps not yet transmitted) frame
6027 	 * in firmware's BA window. Firmware is not going to retransmit any
6028 	 * frames before its BA window so mark them all as done.
6029 	 */
6030 	ieee80211_output_ba_move_window(ic, ni, ban->tid, ssn);
6031 	iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
6032 	iwm_clear_oactive(sc, ring);
6033 }
6034 
6035 void
iwm_rx_bmiss(struct iwm_softc * sc,struct iwm_rx_packet * pkt,struct iwm_rx_data * data)6036 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
6037     struct iwm_rx_data *data)
6038 {
6039 	struct ieee80211com *ic = &sc->sc_ic;
6040 	struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
6041 	uint32_t missed;
6042 
6043 	if ((ic->ic_opmode != IEEE80211_M_STA) ||
6044 	    (ic->ic_state != IEEE80211_S_RUN))
6045 		return;
6046 
6047 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
6048 	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
6049 
6050 	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
6051 	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
6052 		if (ic->ic_if.if_flags & IFF_DEBUG)
6053 			printf("%s: receiving no beacons from %s; checking if "
6054 			    "this AP is still responding to probe requests\n",
6055 			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
6056 		/*
6057 		 * Rather than go directly to scan state, try to send a
6058 		 * directed probe request first. If that fails then the
6059 		 * state machine will drop us into scanning after timing
6060 		 * out waiting for a probe response.
6061 		 */
6062 		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
6063 		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
6064 	}
6065 
6066 }
6067 
6068 int
iwm_binding_cmd(struct iwm_softc * sc,struct iwm_node * in,uint32_t action)6069 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
6070 {
6071 	struct iwm_binding_cmd cmd;
6072 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
6073 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
6074 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
6075 	uint32_t status;
6076 	size_t len;
6077 
6078 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
6079 		panic("binding already added");
6080 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
6081 		panic("binding already removed");
6082 
6083 	if (phyctxt == NULL) /* XXX race with iwm_stop() */
6084 		return EINVAL;
6085 
6086 	memset(&cmd, 0, sizeof(cmd));
6087 
6088 	cmd.id_and_color
6089 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
6090 	cmd.action = htole32(action);
6091 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
6092 
6093 	cmd.macs[0] = htole32(mac_id);
6094 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
6095 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
6096 
6097 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
6098 	    !isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CDB_SUPPORT))
6099 		cmd.lmac_id = htole32(IWM_LMAC_24G_INDEX);
6100 	else
6101 		cmd.lmac_id = htole32(IWM_LMAC_5G_INDEX);
6102 
6103 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT))
6104 		len = sizeof(cmd);
6105 	else
6106 		len = sizeof(struct iwm_binding_cmd_v1);
6107 	status = 0;
6108 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD, len, &cmd,
6109 	    &status);
6110 	if (err == 0 && status != 0)
6111 		err = EIO;
6112 
6113 	return err;
6114 }
6115 
6116 void
iwm_phy_ctxt_cmd_hdr(struct iwm_softc * sc,struct iwm_phy_ctxt * ctxt,struct iwm_phy_context_cmd * cmd,uint32_t action,uint32_t apply_time)6117 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6118     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
6119 {
6120 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
6121 
6122 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
6123 	    ctxt->color));
6124 	cmd->action = htole32(action);
6125 	cmd->apply_time = htole32(apply_time);
6126 }
6127 
6128 void
iwm_phy_ctxt_cmd_data(struct iwm_softc * sc,struct iwm_phy_context_cmd * cmd,struct ieee80211_channel * chan,uint8_t chains_static,uint8_t chains_dynamic,uint8_t sco,uint8_t vht_chan_width)6129 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
6130     struct ieee80211_channel *chan, uint8_t chains_static,
6131     uint8_t chains_dynamic, uint8_t sco, uint8_t vht_chan_width)
6132 {
6133 	struct ieee80211com *ic = &sc->sc_ic;
6134 	uint8_t active_cnt, idle_cnt;
6135 
6136 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
6137 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
6138 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
6139 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
6140 		cmd->ci.ctrl_pos = iwm_get_vht_ctrl_pos(ic, chan);
6141 		cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE80;
6142 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
6143 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
6144 			/* secondary chan above -> control chan below */
6145 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6146 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6147 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
6148 			/* secondary chan below -> control chan above */
6149 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6150 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6151 		} else {
6152 			cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6153 			cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6154 		}
6155 	} else {
6156 		cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6157 		cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6158 	}
6159 
6160 	/* Set rx the chains */
6161 	idle_cnt = chains_static;
6162 	active_cnt = chains_dynamic;
6163 
6164 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6165 					IWM_PHY_RX_CHAIN_VALID_POS);
6166 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
6167 	cmd->rxchain_info |= htole32(active_cnt <<
6168 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
6169 
6170 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6171 }
6172 
6173 uint8_t
iwm_get_vht_ctrl_pos(struct ieee80211com * ic,struct ieee80211_channel * chan)6174 iwm_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
6175 {
6176 	int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
6177 	int primary_idx = ic->ic_bss->ni_primary_chan;
6178 	/*
6179 	 * The FW is expected to check the control channel position only
6180 	 * when in HT/VHT and the channel width is not 20MHz. Return
6181 	 * this value as the default one:
6182 	 */
6183 	uint8_t pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6184 
6185 	switch (primary_idx - center_idx) {
6186 	case -6:
6187 		pos = IWM_PHY_VHT_CTRL_POS_2_BELOW;
6188 		break;
6189 	case -2:
6190 		pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6191 		break;
6192 	case 2:
6193 		pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6194 		break;
6195 	case 6:
6196 		pos = IWM_PHY_VHT_CTRL_POS_2_ABOVE;
6197 		break;
6198 	default:
6199 		break;
6200 	}
6201 
6202 	return pos;
6203 }
6204 
6205 int
iwm_phy_ctxt_cmd_uhb(struct iwm_softc * sc,struct iwm_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)6206 iwm_phy_ctxt_cmd_uhb(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6207     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6208     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
6209 {
6210 	struct ieee80211com *ic = &sc->sc_ic;
6211 	struct iwm_phy_context_cmd_uhb cmd;
6212 	uint8_t active_cnt, idle_cnt;
6213 	struct ieee80211_channel *chan = ctxt->channel;
6214 
6215 	memset(&cmd, 0, sizeof(cmd));
6216 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
6217 	    ctxt->color));
6218 	cmd.action = htole32(action);
6219 	cmd.apply_time = htole32(apply_time);
6220 
6221 	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
6222 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
6223 	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
6224 	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
6225 		cmd.ci.ctrl_pos = iwm_get_vht_ctrl_pos(ic, chan);
6226 		cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE80;
6227 	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
6228 		if (sco == IEEE80211_HTOP0_SCO_SCA) {
6229 			/* secondary chan above -> control chan below */
6230 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6231 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6232 		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
6233 			/* secondary chan below -> control chan above */
6234 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_ABOVE;
6235 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE40;
6236 		} else {
6237 			cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6238 			cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6239 		}
6240 	} else {
6241 		cmd.ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
6242 		cmd.ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
6243 	}
6244 
6245 	idle_cnt = chains_static;
6246 	active_cnt = chains_dynamic;
6247 	cmd.rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6248 					IWM_PHY_RX_CHAIN_VALID_POS);
6249 	cmd.rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
6250 	cmd.rxchain_info |= htole32(active_cnt <<
6251 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
6252 	cmd.txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6253 
6254 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6255 }
6256 
6257 int
iwm_phy_ctxt_cmd(struct iwm_softc * sc,struct iwm_phy_ctxt * ctxt,uint8_t chains_static,uint8_t chains_dynamic,uint32_t action,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)6258 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6259     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
6260     uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
6261 {
6262 	struct iwm_phy_context_cmd cmd;
6263 
6264 	/*
6265 	 * Intel increased the size of the fw_channel_info struct and neglected
6266 	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
6267 	 * member in the middle.
6268 	 * To keep things simple we use a separate function to handle the larger
6269 	 * variant of the phy context command.
6270 	 */
6271 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
6272 		return iwm_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
6273 		    chains_dynamic, action, apply_time, sco, vht_chan_width);
6274 
6275 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
6276 
6277 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
6278 	    chains_static, chains_dynamic, sco, vht_chan_width);
6279 
6280 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
6281 	    sizeof(struct iwm_phy_context_cmd), &cmd);
6282 }
6283 
6284 int
iwm_send_cmd(struct iwm_softc * sc,struct iwm_host_cmd * hcmd)6285 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6286 {
6287 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6288 	struct iwm_tfd *desc;
6289 	struct iwm_tx_data *txdata;
6290 	struct iwm_device_cmd *cmd;
6291 	struct mbuf *m;
6292 	bus_addr_t paddr;
6293 	uint32_t addr_lo;
6294 	int err = 0, i, paylen, off, s;
6295 	int idx, code, async, group_id;
6296 	size_t hdrlen, datasz;
6297 	uint8_t *data;
6298 	int generation = sc->sc_generation;
6299 
6300 	code = hcmd->id;
6301 	async = hcmd->flags & IWM_CMD_ASYNC;
6302 	idx = ring->cur;
6303 
6304 	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
6305 		paylen += hcmd->len[i];
6306 	}
6307 
6308 	/* If this command waits for a response, allocate response buffer. */
6309 	hcmd->resp_pkt = NULL;
6310 	if (hcmd->flags & IWM_CMD_WANT_RESP) {
6311 		uint8_t *resp_buf;
6312 		KASSERT(!async);
6313 		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
6314 		KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
6315 		if (sc->sc_cmd_resp_pkt[idx] != NULL)
6316 			return ENOSPC;
6317 		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
6318 		    M_NOWAIT | M_ZERO);
6319 		if (resp_buf == NULL)
6320 			return ENOMEM;
6321 		sc->sc_cmd_resp_pkt[idx] = resp_buf;
6322 		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
6323 	} else {
6324 		sc->sc_cmd_resp_pkt[idx] = NULL;
6325 	}
6326 
6327 	s = splnet();
6328 
6329 	desc = &ring->desc[idx];
6330 	txdata = &ring->data[idx];
6331 
6332 	group_id = iwm_cmd_groupid(code);
6333 	if (group_id != 0) {
6334 		hdrlen = sizeof(cmd->hdr_wide);
6335 		datasz = sizeof(cmd->data_wide);
6336 	} else {
6337 		hdrlen = sizeof(cmd->hdr);
6338 		datasz = sizeof(cmd->data);
6339 	}
6340 
6341 	if (paylen > datasz) {
6342 		/* Command is too large to fit in pre-allocated space. */
6343 		size_t totlen = hdrlen + paylen;
6344 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
6345 			printf("%s: firmware command too long (%zd bytes)\n",
6346 			    DEVNAME(sc), totlen);
6347 			err = EINVAL;
6348 			goto out;
6349 		}
6350 		m = MCLGETL(NULL, M_DONTWAIT, totlen);
6351 		if (m == NULL) {
6352 			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
6353 			    DEVNAME(sc), totlen);
6354 			err = ENOMEM;
6355 			goto out;
6356 		}
6357 		cmd = mtod(m, struct iwm_device_cmd *);
6358 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
6359 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6360 		if (err) {
6361 			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
6362 			    DEVNAME(sc), totlen);
6363 			m_freem(m);
6364 			goto out;
6365 		}
6366 		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
6367 		paddr = txdata->map->dm_segs[0].ds_addr;
6368 	} else {
6369 		cmd = &ring->cmd[idx];
6370 		paddr = txdata->cmd_paddr;
6371 	}
6372 
6373 	if (group_id != 0) {
6374 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
6375 		cmd->hdr_wide.group_id = group_id;
6376 		cmd->hdr_wide.qid = ring->qid;
6377 		cmd->hdr_wide.idx = idx;
6378 		cmd->hdr_wide.length = htole16(paylen);
6379 		cmd->hdr_wide.version = iwm_cmd_version(code);
6380 		data = cmd->data_wide;
6381 	} else {
6382 		cmd->hdr.code = code;
6383 		cmd->hdr.flags = 0;
6384 		cmd->hdr.qid = ring->qid;
6385 		cmd->hdr.idx = idx;
6386 		data = cmd->data;
6387 	}
6388 
6389 	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
6390 		if (hcmd->len[i] == 0)
6391 			continue;
6392 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
6393 		off += hcmd->len[i];
6394 	}
6395 	KASSERT(off == paylen);
6396 
6397 	/* lo field is not aligned */
6398 	addr_lo = htole32((uint32_t)paddr);
6399 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
6400 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
6401 	    | ((hdrlen + paylen) << 4));
6402 	desc->num_tbs = 1;
6403 
6404 	if (paylen > datasz) {
6405 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
6406 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
6407 	} else {
6408 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6409 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6410 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
6411 	}
6412 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6413 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6414 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6415 
6416 	/*
6417 	 * Wake up the NIC to make sure that the firmware will see the host
6418 	 * command - we will let the NIC sleep once all the host commands
6419 	 * returned. This needs to be done only on 7000 family NICs.
6420 	 */
6421 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
6422 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
6423 			err = EBUSY;
6424 			goto out;
6425 		}
6426 	}
6427 
6428 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
6429 
6430 	/* Kick command ring. */
6431 	ring->queued++;
6432 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
6433 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6434 
6435 	if (!async) {
6436 		err = tsleep_nsec(desc, PCATCH, "iwmcmd", SEC_TO_NSEC(1));
6437 		if (err == 0) {
6438 			/* if hardware is no longer up, return error */
6439 			if (generation != sc->sc_generation) {
6440 				err = ENXIO;
6441 				goto out;
6442 			}
6443 
6444 			/* Response buffer will be freed in iwm_free_resp(). */
6445 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
6446 			sc->sc_cmd_resp_pkt[idx] = NULL;
6447 		} else if (generation == sc->sc_generation) {
6448 			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
6449 			    sc->sc_cmd_resp_len[idx]);
6450 			sc->sc_cmd_resp_pkt[idx] = NULL;
6451 		}
6452 	}
6453  out:
6454 	splx(s);
6455 
6456 	return err;
6457 }
6458 
6459 int
iwm_send_cmd_pdu(struct iwm_softc * sc,uint32_t id,uint32_t flags,uint16_t len,const void * data)6460 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
6461     uint16_t len, const void *data)
6462 {
6463 	struct iwm_host_cmd cmd = {
6464 		.id = id,
6465 		.len = { len, },
6466 		.data = { data, },
6467 		.flags = flags,
6468 	};
6469 
6470 	return iwm_send_cmd(sc, &cmd);
6471 }
6472 
6473 int
iwm_send_cmd_status(struct iwm_softc * sc,struct iwm_host_cmd * cmd,uint32_t * status)6474 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
6475     uint32_t *status)
6476 {
6477 	struct iwm_rx_packet *pkt;
6478 	struct iwm_cmd_response *resp;
6479 	int err, resp_len;
6480 
6481 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
6482 	cmd->flags |= IWM_CMD_WANT_RESP;
6483 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
6484 
6485 	err = iwm_send_cmd(sc, cmd);
6486 	if (err)
6487 		return err;
6488 
6489 	pkt = cmd->resp_pkt;
6490 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
6491 		return EIO;
6492 
6493 	resp_len = iwm_rx_packet_payload_len(pkt);
6494 	if (resp_len != sizeof(*resp)) {
6495 		iwm_free_resp(sc, cmd);
6496 		return EIO;
6497 	}
6498 
6499 	resp = (void *)pkt->data;
6500 	*status = le32toh(resp->status);
6501 	iwm_free_resp(sc, cmd);
6502 	return err;
6503 }
6504 
6505 int
iwm_send_cmd_pdu_status(struct iwm_softc * sc,uint32_t id,uint16_t len,const void * data,uint32_t * status)6506 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
6507     const void *data, uint32_t *status)
6508 {
6509 	struct iwm_host_cmd cmd = {
6510 		.id = id,
6511 		.len = { len, },
6512 		.data = { data, },
6513 	};
6514 
6515 	return iwm_send_cmd_status(sc, &cmd, status);
6516 }
6517 
6518 void
iwm_free_resp(struct iwm_softc * sc,struct iwm_host_cmd * hcmd)6519 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6520 {
6521 	KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
6522 	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
6523 	hcmd->resp_pkt = NULL;
6524 }
6525 
6526 void
iwm_cmd_done(struct iwm_softc * sc,int qid,int idx,int code)6527 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
6528 {
6529 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6530 	struct iwm_tx_data *data;
6531 
6532 	if (qid != sc->cmdqid) {
6533 		return;	/* Not a command ack. */
6534 	}
6535 
6536 	data = &ring->data[idx];
6537 
6538 	if (data->m != NULL) {
6539 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6540 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6541 		bus_dmamap_unload(sc->sc_dmat, data->map);
6542 		m_freem(data->m);
6543 		data->m = NULL;
6544 	}
6545 	wakeup(&ring->desc[idx]);
6546 
6547 	if (ring->queued == 0) {
6548 		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
6549 		    DEVNAME(sc), code));
6550 	} else if (--ring->queued == 0) {
6551 		/*
6552 		 * 7000 family NICs are locked while commands are in progress.
6553 		 * All commands are now done so we may unlock the NIC again.
6554 		 */
6555 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6556 			iwm_nic_unlock(sc);
6557 	}
6558 }
6559 
6560 void
iwm_update_sched(struct iwm_softc * sc,int qid,int idx,uint8_t sta_id,uint16_t len)6561 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
6562     uint16_t len)
6563 {
6564 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6565 	uint16_t val;
6566 
6567 	scd_bc_tbl = sc->sched_dma.vaddr;
6568 
6569 	len += IWM_TX_CRC_SIZE + IWM_TX_DELIMITER_SIZE;
6570 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
6571 		len = roundup(len, 4) / 4;
6572 
6573 	val = htole16(sta_id << 12 | len);
6574 
6575 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6576 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6577 
6578 	/* Update TX scheduler. */
6579 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6580 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6581 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6582 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6583 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6584 }
6585 
6586 void
iwm_reset_sched(struct iwm_softc * sc,int qid,int idx,uint8_t sta_id)6587 iwm_reset_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id)
6588 {
6589 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
6590 	uint16_t val;
6591 
6592 	scd_bc_tbl = sc->sched_dma.vaddr;
6593 
6594 	val = htole16(1 | (sta_id << 12));
6595 
6596 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6597 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6598 
6599 	/* Update TX scheduler. */
6600 	scd_bc_tbl[qid].tfd_offset[idx] = val;
6601 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6602 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6603 
6604 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6605 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6606 }
6607 
6608 /*
6609  * Fill in various bit for management frames, and leave them
6610  * unfilled for data frames (firmware takes care of that).
6611  * Return the selected legacy TX rate, or zero if HT/VHT is used.
6612  */
6613 uint8_t
iwm_tx_fill_cmd(struct iwm_softc * sc,struct iwm_node * in,struct ieee80211_frame * wh,struct iwm_tx_cmd * tx)6614 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
6615     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
6616 {
6617 	struct ieee80211com *ic = &sc->sc_ic;
6618 	struct ieee80211_node *ni = &in->in_ni;
6619 	const struct iwm_rate *rinfo;
6620 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6621 	int min_ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
6622 	int ridx, rate_flags;
6623 	uint8_t rate = 0;
6624 
6625 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
6626 	tx->data_retry_limit = IWM_LOW_RETRY_LIMIT;
6627 
6628 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
6629 	    type != IEEE80211_FC0_TYPE_DATA) {
6630 		/* for non-data, use the lowest supported rate */
6631 		ridx = min_ridx;
6632 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
6633 	} else if (ic->ic_fixed_mcs != -1) {
6634 		if (ni->ni_flags & IEEE80211_NODE_VHT)
6635 			ridx = IWM_FIRST_OFDM_RATE;
6636 		else
6637 			ridx = sc->sc_fixed_ridx;
6638 	} else if (ic->ic_fixed_rate != -1) {
6639 		ridx = sc->sc_fixed_ridx;
6640  	} else {
6641 		int i;
6642 		/* Use firmware rateset retry table. */
6643 		tx->initial_rate_index = 0;
6644 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
6645 		if (ni->ni_flags & IEEE80211_NODE_HT) /* VHT implies HT */
6646 			return 0;
6647 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6648 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
6649 		for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
6650 			if (iwm_rates[i].rate == (ni->ni_txrate &
6651 			    IEEE80211_RATE_VAL)) {
6652 				ridx = i;
6653 				break;
6654 			}
6655 		}
6656 		return iwm_rates[ridx].rate & 0xff;
6657 	}
6658 
6659 	rinfo = &iwm_rates[ridx];
6660 	if ((ni->ni_flags & IEEE80211_NODE_VHT) == 0 &&
6661 	    iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
6662 		rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
6663 	else
6664 		rate_flags = iwm_valid_siso_ant_rate_mask(sc);
6665 	if (IWM_RIDX_IS_CCK(ridx))
6666 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
6667 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6668 	    type == IEEE80211_FC0_TYPE_DATA &&
6669 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6670 		uint8_t sco = IEEE80211_HTOP0_SCO_SCN;
6671 		uint8_t vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
6672 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
6673 		    IEEE80211_CHAN_80MHZ_ALLOWED(ni->ni_chan) &&
6674 		    ieee80211_node_supports_vht_chan80(ni))
6675 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
6676 		else if (IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
6677 		    ieee80211_node_supports_ht_chan40(ni))
6678 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
6679 		if (ni->ni_flags & IEEE80211_NODE_VHT)
6680 			rate_flags |= IWM_RATE_MCS_VHT_MSK;
6681 		else
6682 			rate_flags |= IWM_RATE_MCS_HT_MSK;
6683 		if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80 &&
6684 		    in->in_phyctxt != NULL &&
6685 		    in->in_phyctxt->vht_chan_width == vht_chan_width) {
6686 			rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_80;
6687 			if (ieee80211_node_supports_vht_sgi80(ni))
6688 				rate_flags |= IWM_RATE_MCS_SGI_MSK;
6689 		} else if ((sco == IEEE80211_HTOP0_SCO_SCA ||
6690 		    sco == IEEE80211_HTOP0_SCO_SCB) &&
6691 		    in->in_phyctxt != NULL && in->in_phyctxt->sco == sco) {
6692 			rate_flags |= IWM_RATE_MCS_CHAN_WIDTH_40;
6693 			if (ieee80211_node_supports_ht_sgi40(ni))
6694 				rate_flags |= IWM_RATE_MCS_SGI_MSK;
6695 		} else if (ieee80211_node_supports_ht_sgi20(ni))
6696 			rate_flags |= IWM_RATE_MCS_SGI_MSK;
6697 		if (ni->ni_flags & IEEE80211_NODE_VHT) {
6698 			/*
6699 			 * ifmedia only provides an MCS index, no NSS.
6700 			 * Use a fixed SISO rate.
6701 			 */
6702 			tx->rate_n_flags = htole32(rate_flags |
6703 			    (ic->ic_fixed_mcs &
6704 			    IWM_RATE_VHT_MCS_RATE_CODE_MSK));
6705 		} else
6706 			tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
6707 	} else
6708 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
6709 
6710 	return rate;
6711 }
6712 
6713 #define TB0_SIZE 16
6714 int
iwm_tx(struct iwm_softc * sc,struct mbuf * m,struct ieee80211_node * ni,int ac)6715 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
6716 {
6717 	struct ieee80211com *ic = &sc->sc_ic;
6718 	struct iwm_node *in = (void *)ni;
6719 	struct iwm_tx_ring *ring;
6720 	struct iwm_tx_data *data;
6721 	struct iwm_tfd *desc;
6722 	struct iwm_device_cmd *cmd;
6723 	struct iwm_tx_cmd *tx;
6724 	struct ieee80211_frame *wh;
6725 	struct ieee80211_key *k = NULL;
6726 	uint8_t rate;
6727 	uint8_t *ivp;
6728 	uint32_t flags;
6729 	u_int hdrlen;
6730 	bus_dma_segment_t *seg;
6731 	uint8_t tid, type, subtype;
6732 	int i, totlen, err, pad;
6733 	int qid, hasqos;
6734 
6735 	wh = mtod(m, struct ieee80211_frame *);
6736 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6737 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6738 	if (type == IEEE80211_FC0_TYPE_CTL)
6739 		hdrlen = sizeof(struct ieee80211_frame_min);
6740 	else
6741 		hdrlen = ieee80211_get_hdrlen(wh);
6742 
6743 	hasqos = ieee80211_has_qos(wh);
6744 	if (type == IEEE80211_FC0_TYPE_DATA)
6745 		tid = IWM_TID_NON_QOS;
6746 	else
6747 		tid = IWM_MAX_TID_COUNT;
6748 
6749 	/*
6750 	 * Map EDCA categories to Tx data queues.
6751 	 *
6752 	 * We use static data queue assignments even in DQA mode. We do not
6753 	 * need to share Tx queues between stations because we only implement
6754 	 * client mode; the firmware's station table contains only one entry
6755 	 * which represents our access point.
6756 	 */
6757 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6758 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6759 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
6760 		else
6761 			qid = IWM_AUX_QUEUE;
6762 	} else if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6763 		qid = IWM_DQA_MIN_MGMT_QUEUE + ac;
6764 	else
6765 		qid = ac;
6766 
6767 	/* If possible, put this frame on an aggregation queue. */
6768 	if (hasqos) {
6769 		struct ieee80211_tx_ba *ba;
6770 		uint16_t qos = ieee80211_get_qos(wh);
6771 		int qostid = qos & IEEE80211_QOS_TID;
6772 		int agg_qid = IWM_FIRST_AGG_TX_QUEUE + qostid;
6773 
6774 		ba = &ni->ni_tx_ba[qostid];
6775 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6776 		    type == IEEE80211_FC0_TYPE_DATA &&
6777 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
6778 		    (sc->tx_ba_queue_mask & (1 << agg_qid)) &&
6779 		    ba->ba_state == IEEE80211_BA_AGREED) {
6780 			qid = agg_qid;
6781 			tid = qostid;
6782 			ac = ieee80211_up_to_ac(ic, qostid);
6783 		}
6784 	}
6785 
6786 	ring = &sc->txq[qid];
6787 	desc = &ring->desc[ring->cur];
6788 	memset(desc, 0, sizeof(*desc));
6789 	data = &ring->data[ring->cur];
6790 
6791 	cmd = &ring->cmd[ring->cur];
6792 	cmd->hdr.code = IWM_TX_CMD;
6793 	cmd->hdr.flags = 0;
6794 	cmd->hdr.qid = ring->qid;
6795 	cmd->hdr.idx = ring->cur;
6796 
6797 	tx = (void *)cmd->data;
6798 	memset(tx, 0, sizeof(*tx));
6799 
6800 	rate = iwm_tx_fill_cmd(sc, in, wh, tx);
6801 
6802 #if NBPFILTER > 0
6803 	if (sc->sc_drvbpf != NULL) {
6804 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
6805 		uint16_t chan_flags;
6806 
6807 		tap->wt_flags = 0;
6808 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
6809 		chan_flags = ni->ni_chan->ic_flags;
6810 		if (ic->ic_curmode != IEEE80211_MODE_11N &&
6811 		    ic->ic_curmode != IEEE80211_MODE_11AC) {
6812 			chan_flags &= ~IEEE80211_CHAN_HT;
6813 			chan_flags &= ~IEEE80211_CHAN_40MHZ;
6814 		}
6815 		if (ic->ic_curmode != IEEE80211_MODE_11AC)
6816 			chan_flags &= ~IEEE80211_CHAN_VHT;
6817 		tap->wt_chan_flags = htole16(chan_flags);
6818 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6819 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6820 		    type == IEEE80211_FC0_TYPE_DATA) {
6821 			tap->wt_rate = (0x80 | ni->ni_txmcs);
6822 		} else
6823 			tap->wt_rate = rate;
6824 		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
6825 		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
6826 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6827 
6828 		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6829 		    m, BPF_DIRECTION_OUT);
6830 	}
6831 #endif
6832 	totlen = m->m_pkthdr.len;
6833 
6834 	if (ic->ic_opmode != IEEE80211_M_MONITOR &&
6835 	    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) {
6836 		k = ieee80211_get_txkey(ic, wh, ni);
6837 		if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6838 		    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
6839 			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
6840 				return ENOBUFS;
6841 			/* 802.11 header may have moved. */
6842 			wh = mtod(m, struct ieee80211_frame *);
6843 			totlen = m->m_pkthdr.len;
6844 			k = NULL; /* skip hardware crypto below */
6845 		} else {
6846 			/* HW appends CCMP MIC */
6847 			totlen += IEEE80211_CCMP_HDRLEN;
6848 		}
6849 	}
6850 
6851 	flags = 0;
6852 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
6853 		flags |= IWM_TX_CMD_FLG_ACK;
6854 	}
6855 
6856 	if (type == IEEE80211_FC0_TYPE_DATA &&
6857 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6858 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
6859 	    (ic->ic_flags & IEEE80211_F_USEPROT)))
6860 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
6861 
6862 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6863 		tx->sta_id = IWM_MONITOR_STA_ID;
6864 	else
6865 		tx->sta_id = IWM_STATION_ID;
6866 
6867 	if (type == IEEE80211_FC0_TYPE_MGT) {
6868 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
6869 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
6870 			tx->pm_frame_timeout = htole16(3);
6871 		else
6872 			tx->pm_frame_timeout = htole16(2);
6873 	} else {
6874 		if (type == IEEE80211_FC0_TYPE_CTL &&
6875 		    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
6876 			struct ieee80211_frame_min *mwh;
6877 			uint8_t *barfrm;
6878 			uint16_t ctl;
6879 			mwh = mtod(m, struct ieee80211_frame_min *);
6880 			barfrm = (uint8_t *)&mwh[1];
6881 			ctl = LE_READ_2(barfrm);
6882 			tid = (ctl & IEEE80211_BA_TID_INFO_MASK) >>
6883 			    IEEE80211_BA_TID_INFO_SHIFT;
6884 			flags |= IWM_TX_CMD_FLG_ACK | IWM_TX_CMD_FLG_BAR;
6885 			tx->data_retry_limit = IWM_BAR_DFAULT_RETRY_LIMIT;
6886 		}
6887 
6888 		tx->pm_frame_timeout = htole16(0);
6889 	}
6890 
6891 	if (hdrlen & 3) {
6892 		/* First segment length must be a multiple of 4. */
6893 		flags |= IWM_TX_CMD_FLG_MH_PAD;
6894 		tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
6895 		pad = 4 - (hdrlen & 3);
6896 	} else
6897 		pad = 0;
6898 
6899 	tx->len = htole16(totlen);
6900 	tx->tid_tspec = tid;
6901 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
6902 
6903 	/* Set physical address of "scratch area". */
6904 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
6905 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
6906 
6907 	/* Copy 802.11 header in TX command. */
6908 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
6909 
6910 	if  (k != NULL && k->k_cipher == IEEE80211_CIPHER_CCMP) {
6911 		/* Trim 802.11 header and prepend CCMP IV. */
6912 		m_adj(m, hdrlen - IEEE80211_CCMP_HDRLEN);
6913 		ivp = mtod(m, u_int8_t *);
6914 		k->k_tsc++;	/* increment the 48-bit PN */
6915 		ivp[0] = k->k_tsc; /* PN0 */
6916 		ivp[1] = k->k_tsc >> 8; /* PN1 */
6917 		ivp[2] = 0;        /* Rsvd */
6918 		ivp[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
6919 		ivp[4] = k->k_tsc >> 16; /* PN2 */
6920 		ivp[5] = k->k_tsc >> 24; /* PN3 */
6921 		ivp[6] = k->k_tsc >> 32; /* PN4 */
6922 		ivp[7] = k->k_tsc >> 40; /* PN5 */
6923 
6924 		tx->sec_ctl = IWM_TX_CMD_SEC_CCM;
6925 		memcpy(tx->key, k->k_key, MIN(sizeof(tx->key), k->k_len));
6926 		/* TX scheduler includes CCMP MIC length. */
6927 		totlen += IEEE80211_CCMP_MICLEN;
6928 	} else {
6929 		/* Trim 802.11 header. */
6930 		m_adj(m, hdrlen);
6931 		tx->sec_ctl = 0;
6932 	}
6933 
6934 	flags |= IWM_TX_CMD_FLG_BT_DIS;
6935 	if (!hasqos)
6936 		flags |= IWM_TX_CMD_FLG_SEQ_CTL;
6937 
6938 	tx->tx_flags |= htole32(flags);
6939 
6940 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6941 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6942 	if (err && err != EFBIG) {
6943 		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6944 		m_freem(m);
6945 		return err;
6946 	}
6947 	if (err) {
6948 #ifdef __FreeBSD_version
6949 		/* Too many DMA segments, linearize mbuf. */
6950 		struct mbuf* m1 = m_collapse(m, M_NOWAIT, IWM_NUM_OF_TBS - 2);
6951 		if (m1 == NULL) {
6952 			device_printf(sc->sc_dev,
6953 				"%s: could not defrag mbuf\n", __func__);
6954 			m_freem(m);
6955 			return (ENOBUFS);
6956 		}
6957 		m = m1;
6958 #else
6959 		/* Too many DMA segments, linearize mbuf. */
6960 		if (m_defrag(m, M_DONTWAIT)) {
6961 			m_freem(m);
6962 			return ENOBUFS;
6963 		}
6964 #endif
6965 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6966 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6967 		if (err) {
6968 			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
6969 			    err);
6970 			m_freem(m);
6971 			return err;
6972 		}
6973 	}
6974 	data->m = m;
6975 	data->in = in;
6976 	data->txmcs = ni->ni_txmcs;
6977 	data->txrate = ni->ni_txrate;
6978 	data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */
6979 	data->ampdu_txnss = ni->ni_vht_ss; /* updated upon Tx interrupt */
6980 
6981 	/* Fill TX descriptor. */
6982 	desc->num_tbs = 2 + data->map->dm_nsegs;
6983 
6984 	desc->tbs[0].lo = htole32(data->cmd_paddr);
6985 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6986 	    (TB0_SIZE << 4));
6987 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
6988 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
6989 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
6990 	      + hdrlen + pad - TB0_SIZE) << 4));
6991 
6992 	/* Other DMA segments are for data payload. */
6993 	seg = data->map->dm_segs;
6994 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6995 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
6996 		desc->tbs[i+2].hi_n_len = \
6997 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)
6998 		    | ((seg->ds_len) << 4));
6999 	}
7000 
7001 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
7002 	    BUS_DMASYNC_PREWRITE);
7003 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
7004 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
7005 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
7006 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
7007 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
7008 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
7009 
7010 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, totlen);
7011 
7012 	/* Kick TX ring. */
7013 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
7014 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
7015 
7016 	/* Mark TX ring as full if we reach a certain threshold. */
7017 	if (++ring->queued > IWM_TX_RING_HIMARK) {
7018 		sc->qfullmsk |= 1 << ring->qid;
7019 	}
7020 
7021 	if (ic->ic_if.if_flags & IFF_UP)
7022 		sc->sc_tx_timer[ring->qid] = 15;
7023 
7024 	return 0;
7025 }
7026 
7027 int
iwm_flush_tx_path(struct iwm_softc * sc,int tfd_queue_msk)7028 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
7029 {
7030 	struct iwm_tx_path_flush_cmd flush_cmd = {
7031 		.sta_id = htole32(IWM_STATION_ID),
7032 		.tid_mask = htole16(0xffff),
7033 	};
7034 	int err;
7035 
7036 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
7037 	    sizeof(flush_cmd), &flush_cmd);
7038 	if (err)
7039                 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
7040 	return err;
7041 }
7042 
7043 #define IWM_FLUSH_WAIT_MS	2000
7044 
7045 int
iwm_wait_tx_queues_empty(struct iwm_softc * sc)7046 iwm_wait_tx_queues_empty(struct iwm_softc *sc)
7047 {
7048 	int i, err;
7049 
7050 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
7051 		struct iwm_tx_ring *ring = &sc->txq[i];
7052 
7053 		if (i == sc->cmdqid)
7054 			continue;
7055 
7056 		while (ring->queued > 0) {
7057 			err = tsleep_nsec(ring, 0, "iwmflush",
7058 			    MSEC_TO_NSEC(IWM_FLUSH_WAIT_MS));
7059 			if (err)
7060 				return err;
7061 		}
7062 	}
7063 
7064 	return 0;
7065 }
7066 
7067 void
iwm_led_enable(struct iwm_softc * sc)7068 iwm_led_enable(struct iwm_softc *sc)
7069 {
7070 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
7071 }
7072 
7073 void
iwm_led_disable(struct iwm_softc * sc)7074 iwm_led_disable(struct iwm_softc *sc)
7075 {
7076 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
7077 }
7078 
7079 int
iwm_led_is_enabled(struct iwm_softc * sc)7080 iwm_led_is_enabled(struct iwm_softc *sc)
7081 {
7082 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
7083 }
7084 
7085 #define IWM_LED_BLINK_TIMEOUT_MSEC    200
7086 
7087 void
iwm_led_blink_timeout(void * arg)7088 iwm_led_blink_timeout(void *arg)
7089 {
7090 	struct iwm_softc *sc = arg;
7091 
7092 	if (iwm_led_is_enabled(sc))
7093 		iwm_led_disable(sc);
7094 	else
7095 		iwm_led_enable(sc);
7096 
7097 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
7098 }
7099 
7100 void
iwm_led_blink_start(struct iwm_softc * sc)7101 iwm_led_blink_start(struct iwm_softc *sc)
7102 {
7103 	timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
7104 	iwm_led_enable(sc);
7105 }
7106 
7107 void
iwm_led_blink_stop(struct iwm_softc * sc)7108 iwm_led_blink_stop(struct iwm_softc *sc)
7109 {
7110 	timeout_del(&sc->sc_led_blink_to);
7111 	iwm_led_disable(sc);
7112 }
7113 
7114 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
7115 
7116 int
iwm_beacon_filter_send_cmd(struct iwm_softc * sc,struct iwm_beacon_filter_cmd * cmd)7117 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
7118     struct iwm_beacon_filter_cmd *cmd)
7119 {
7120 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
7121 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
7122 }
7123 
7124 void
iwm_beacon_filter_set_cqm_params(struct iwm_softc * sc,struct iwm_node * in,struct iwm_beacon_filter_cmd * cmd)7125 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
7126     struct iwm_beacon_filter_cmd *cmd)
7127 {
7128 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
7129 }
7130 
7131 int
iwm_update_beacon_abort(struct iwm_softc * sc,struct iwm_node * in,int enable)7132 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
7133 {
7134 	struct iwm_beacon_filter_cmd cmd = {
7135 		IWM_BF_CMD_CONFIG_DEFAULTS,
7136 		.bf_enable_beacon_filter = htole32(1),
7137 		.ba_enable_beacon_abort = htole32(enable),
7138 	};
7139 
7140 	if (!sc->sc_bf.bf_enabled)
7141 		return 0;
7142 
7143 	sc->sc_bf.ba_enabled = enable;
7144 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7145 	return iwm_beacon_filter_send_cmd(sc, &cmd);
7146 }
7147 
7148 void
iwm_power_build_cmd(struct iwm_softc * sc,struct iwm_node * in,struct iwm_mac_power_cmd * cmd)7149 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
7150     struct iwm_mac_power_cmd *cmd)
7151 {
7152 	struct ieee80211com *ic = &sc->sc_ic;
7153 	struct ieee80211_node *ni = &in->in_ni;
7154 	int dtim_period, dtim_msec, keep_alive;
7155 
7156 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7157 	    in->in_color));
7158 	if (ni->ni_dtimperiod)
7159 		dtim_period = ni->ni_dtimperiod;
7160 	else
7161 		dtim_period = 1;
7162 
7163 	/*
7164 	 * Regardless of power management state the driver must set
7165 	 * keep alive period. FW will use it for sending keep alive NDPs
7166 	 * immediately after association. Check that keep alive period
7167 	 * is at least 3 * DTIM.
7168 	 */
7169 	dtim_msec = dtim_period * ni->ni_intval;
7170 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
7171 	keep_alive = roundup(keep_alive, 1000) / 1000;
7172 	cmd->keep_alive_seconds = htole16(keep_alive);
7173 
7174 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
7175 		cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
7176 }
7177 
7178 int
iwm_power_mac_update_mode(struct iwm_softc * sc,struct iwm_node * in)7179 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
7180 {
7181 	int err;
7182 	int ba_enable;
7183 	struct iwm_mac_power_cmd cmd;
7184 
7185 	memset(&cmd, 0, sizeof(cmd));
7186 
7187 	iwm_power_build_cmd(sc, in, &cmd);
7188 
7189 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
7190 	    sizeof(cmd), &cmd);
7191 	if (err != 0)
7192 		return err;
7193 
7194 	ba_enable = !!(cmd.flags &
7195 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
7196 	return iwm_update_beacon_abort(sc, in, ba_enable);
7197 }
7198 
7199 int
iwm_power_update_device(struct iwm_softc * sc)7200 iwm_power_update_device(struct iwm_softc *sc)
7201 {
7202 	struct iwm_device_power_cmd cmd = { };
7203 	struct ieee80211com *ic = &sc->sc_ic;
7204 
7205 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
7206 		cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
7207 
7208 	return iwm_send_cmd_pdu(sc,
7209 	    IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
7210 }
7211 
7212 int
iwm_enable_beacon_filter(struct iwm_softc * sc,struct iwm_node * in)7213 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
7214 {
7215 	struct iwm_beacon_filter_cmd cmd = {
7216 		IWM_BF_CMD_CONFIG_DEFAULTS,
7217 		.bf_enable_beacon_filter = htole32(1),
7218 	};
7219 	int err;
7220 
7221 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7222 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
7223 
7224 	if (err == 0)
7225 		sc->sc_bf.bf_enabled = 1;
7226 
7227 	return err;
7228 }
7229 
7230 int
iwm_disable_beacon_filter(struct iwm_softc * sc)7231 iwm_disable_beacon_filter(struct iwm_softc *sc)
7232 {
7233 	struct iwm_beacon_filter_cmd cmd;
7234 	int err;
7235 
7236 	memset(&cmd, 0, sizeof(cmd));
7237 
7238 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
7239 	if (err == 0)
7240 		sc->sc_bf.bf_enabled = 0;
7241 
7242 	return err;
7243 }
7244 
7245 int
iwm_add_sta_cmd(struct iwm_softc * sc,struct iwm_node * in,int update)7246 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
7247 {
7248 	struct iwm_add_sta_cmd add_sta_cmd;
7249 	int err;
7250 	uint32_t status, aggsize;
7251 	const uint32_t max_aggsize = (IWM_STA_FLG_MAX_AGG_SIZE_64K >>
7252 		    IWM_STA_FLG_MAX_AGG_SIZE_SHIFT);
7253 	size_t cmdsize;
7254 	struct ieee80211com *ic = &sc->sc_ic;
7255 
7256 	if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
7257 		panic("STA already added");
7258 
7259 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
7260 
7261 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7262 		add_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
7263 	else
7264 		add_sta_cmd.sta_id = IWM_STATION_ID;
7265 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
7266 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
7267 			add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE;
7268 		else
7269 			add_sta_cmd.station_type = IWM_STA_LINK;
7270 	}
7271 	add_sta_cmd.mac_id_n_color
7272 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
7273 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7274 		int qid;
7275 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr);
7276 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7277 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
7278 		else
7279 			qid = IWM_AUX_QUEUE;
7280 		in->tfd_queue_msk |= (1 << qid);
7281 	} else {
7282 		int ac;
7283 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
7284 			int qid = ac;
7285 			if (isset(sc->sc_enabled_capa,
7286 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7287 				qid += IWM_DQA_MIN_MGMT_QUEUE;
7288 			in->tfd_queue_msk |= (1 << qid);
7289 		}
7290 	}
7291 	if (!update) {
7292 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
7293 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
7294 			    etherbroadcastaddr);
7295 		else
7296 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
7297 			    in->in_macaddr);
7298 	}
7299 	add_sta_cmd.add_modify = update ? 1 : 0;
7300 	add_sta_cmd.station_flags_msk
7301 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
7302 	if (update) {
7303 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_QUEUES |
7304 		    IWM_STA_MODIFY_TID_DISABLE_TX);
7305 	}
7306 	add_sta_cmd.tid_disable_tx = htole16(in->tid_disable_ampdu);
7307 	add_sta_cmd.tfd_queue_msk = htole32(in->tfd_queue_msk);
7308 
7309 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
7310 		add_sta_cmd.station_flags_msk
7311 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
7312 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
7313 
7314 		if (iwm_mimo_enabled(sc)) {
7315 			if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
7316 				uint16_t rx_mcs = (in->in_ni.ni_vht_rxmcs &
7317 				    IEEE80211_VHT_MCS_FOR_SS_MASK(2)) >>
7318 				    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2);
7319 				if (rx_mcs != IEEE80211_VHT_MCS_SS_NOT_SUPP) {
7320 					add_sta_cmd.station_flags |=
7321 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
7322 				}
7323 			} else {
7324 				if (in->in_ni.ni_rxmcs[1] != 0) {
7325 					add_sta_cmd.station_flags |=
7326 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO2);
7327 				}
7328 				if (in->in_ni.ni_rxmcs[2] != 0) {
7329 					add_sta_cmd.station_flags |=
7330 					    htole32(IWM_STA_FLG_MIMO_EN_MIMO3);
7331 				}
7332 			}
7333 		}
7334 
7335 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
7336 		    ieee80211_node_supports_ht_chan40(&in->in_ni)) {
7337 			add_sta_cmd.station_flags |= htole32(
7338 			    IWM_STA_FLG_FAT_EN_40MHZ);
7339 		}
7340 
7341 		if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
7342 			if (IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
7343 			    ieee80211_node_supports_vht_chan80(&in->in_ni)) {
7344 				add_sta_cmd.station_flags |= htole32(
7345 				    IWM_STA_FLG_FAT_EN_80MHZ);
7346 			}
7347 			aggsize = (in->in_ni.ni_vhtcaps &
7348 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK) >>
7349 			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT;
7350 		} else {
7351 			aggsize = (in->in_ni.ni_ampdu_param &
7352 			    IEEE80211_AMPDU_PARAM_LE);
7353 		}
7354 		if (aggsize > max_aggsize)
7355 			aggsize = max_aggsize;
7356 		add_sta_cmd.station_flags |= htole32((aggsize <<
7357 		    IWM_STA_FLG_MAX_AGG_SIZE_SHIFT) &
7358 		    IWM_STA_FLG_MAX_AGG_SIZE_MSK);
7359 
7360 		switch (in->in_ni.ni_ampdu_param & IEEE80211_AMPDU_PARAM_SS) {
7361 		case IEEE80211_AMPDU_PARAM_SS_2:
7362 			add_sta_cmd.station_flags
7363 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
7364 			break;
7365 		case IEEE80211_AMPDU_PARAM_SS_4:
7366 			add_sta_cmd.station_flags
7367 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
7368 			break;
7369 		case IEEE80211_AMPDU_PARAM_SS_8:
7370 			add_sta_cmd.station_flags
7371 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
7372 			break;
7373 		case IEEE80211_AMPDU_PARAM_SS_16:
7374 			add_sta_cmd.station_flags
7375 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
7376 			break;
7377 		default:
7378 			break;
7379 		}
7380 	}
7381 
7382 	status = IWM_ADD_STA_SUCCESS;
7383 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7384 		cmdsize = sizeof(add_sta_cmd);
7385 	else
7386 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7387 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
7388 	    &add_sta_cmd, &status);
7389 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
7390 		err = EIO;
7391 
7392 	return err;
7393 }
7394 
7395 int
iwm_add_aux_sta(struct iwm_softc * sc)7396 iwm_add_aux_sta(struct iwm_softc *sc)
7397 {
7398 	struct iwm_add_sta_cmd cmd;
7399 	int err, qid;
7400 	uint32_t status;
7401 	size_t cmdsize;
7402 
7403 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
7404 		qid = IWM_DQA_AUX_QUEUE;
7405 		err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
7406 		    IWM_TX_FIFO_MCAST, 0, IWM_MAX_TID_COUNT, 0);
7407 	} else {
7408 		qid = IWM_AUX_QUEUE;
7409 		err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
7410 	}
7411 	if (err)
7412 		return err;
7413 
7414 	memset(&cmd, 0, sizeof(cmd));
7415 	cmd.sta_id = IWM_AUX_STA_ID;
7416 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7417 		cmd.station_type = IWM_STA_AUX_ACTIVITY;
7418 	cmd.mac_id_n_color =
7419 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
7420 	cmd.tfd_queue_msk = htole32(1 << qid);
7421 	cmd.tid_disable_tx = htole16(0xffff);
7422 
7423 	status = IWM_ADD_STA_SUCCESS;
7424 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7425 		cmdsize = sizeof(cmd);
7426 	else
7427 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7428 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
7429 	    &status);
7430 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
7431 		err = EIO;
7432 
7433 	return err;
7434 }
7435 
7436 int
iwm_drain_sta(struct iwm_softc * sc,struct iwm_node * in,int drain)7437 iwm_drain_sta(struct iwm_softc *sc, struct iwm_node* in, int drain)
7438 {
7439 	struct iwm_add_sta_cmd cmd;
7440 	int err;
7441 	uint32_t status;
7442 	size_t cmdsize;
7443 
7444 	memset(&cmd, 0, sizeof(cmd));
7445 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
7446 	    in->in_color));
7447 	cmd.sta_id = IWM_STATION_ID;
7448 	cmd.add_modify = IWM_STA_MODE_MODIFY;
7449 	cmd.station_flags = drain ? htole32(IWM_STA_FLG_DRAIN_FLOW) : 0;
7450 	cmd.station_flags_msk = htole32(IWM_STA_FLG_DRAIN_FLOW);
7451 
7452 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7453 		cmdsize = sizeof(cmd);
7454 	else
7455 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
7456 
7457 	status = IWM_ADD_STA_SUCCESS;
7458 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA,
7459 	    cmdsize, &cmd, &status);
7460 	if (err) {
7461 		printf("%s: could not update sta (error %d)\n",
7462 		    DEVNAME(sc), err);
7463 		return err;
7464 	}
7465 
7466 	switch (status & IWM_ADD_STA_STATUS_MASK) {
7467 	case IWM_ADD_STA_SUCCESS:
7468 		break;
7469 	default:
7470 		err = EIO;
7471 		printf("%s: Couldn't %s draining for station\n",
7472 		    DEVNAME(sc), drain ? "enable" : "disable");
7473 		break;
7474 	}
7475 
7476 	return err;
7477 }
7478 
7479 int
iwm_flush_sta(struct iwm_softc * sc,struct iwm_node * in)7480 iwm_flush_sta(struct iwm_softc *sc, struct iwm_node *in)
7481 {
7482 	int err;
7483 
7484 	sc->sc_flags |= IWM_FLAG_TXFLUSH;
7485 
7486 	err = iwm_drain_sta(sc, in, 1);
7487 	if (err)
7488 		goto done;
7489 
7490 	err = iwm_flush_tx_path(sc, in->tfd_queue_msk);
7491 	if (err) {
7492 		printf("%s: could not flush Tx path (error %d)\n",
7493 		    DEVNAME(sc), err);
7494 		goto done;
7495 	}
7496 
7497 	/*
7498 	 * Flushing Tx rings may fail if the AP has disappeared.
7499 	 * We can rely on iwm_newstate_task() to reset everything and begin
7500 	 * scanning again if we are left with outstanding frames on queues.
7501 	 */
7502 	err = iwm_wait_tx_queues_empty(sc);
7503 	if (err)
7504 		goto done;
7505 
7506 	err = iwm_drain_sta(sc, in, 0);
7507 done:
7508 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
7509 	return err;
7510 }
7511 
7512 int
iwm_rm_sta_cmd(struct iwm_softc * sc,struct iwm_node * in)7513 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
7514 {
7515 	struct ieee80211com *ic = &sc->sc_ic;
7516 	struct iwm_rm_sta_cmd rm_sta_cmd;
7517 	int err;
7518 
7519 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
7520 		panic("sta already removed");
7521 
7522 	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
7523 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7524 		rm_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
7525 	else
7526 		rm_sta_cmd.sta_id = IWM_STATION_ID;
7527 
7528 	err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
7529 	    &rm_sta_cmd);
7530 
7531 	return err;
7532 }
7533 
7534 uint16_t
iwm_scan_rx_chain(struct iwm_softc * sc)7535 iwm_scan_rx_chain(struct iwm_softc *sc)
7536 {
7537 	uint16_t rx_chain;
7538 	uint8_t rx_ant;
7539 
7540 	rx_ant = iwm_fw_valid_rx_ant(sc);
7541 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
7542 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
7543 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
7544 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
7545 	return htole16(rx_chain);
7546 }
7547 
7548 uint32_t
iwm_scan_rate_n_flags(struct iwm_softc * sc,int flags,int no_cck)7549 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
7550 {
7551 	uint32_t tx_ant;
7552 	int i, ind;
7553 
7554 	for (i = 0, ind = sc->sc_scan_last_antenna;
7555 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
7556 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
7557 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
7558 			sc->sc_scan_last_antenna = ind;
7559 			break;
7560 		}
7561 	}
7562 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
7563 
7564 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
7565 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
7566 				   tx_ant);
7567 	else
7568 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
7569 }
7570 
7571 uint8_t
iwm_lmac_scan_fill_channels(struct iwm_softc * sc,struct iwm_scan_channel_cfg_lmac * chan,int n_ssids,int bgscan)7572 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
7573     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
7574 {
7575 	struct ieee80211com *ic = &sc->sc_ic;
7576 	struct ieee80211_channel *c;
7577 	uint8_t nchan;
7578 
7579 	for (nchan = 0, c = &ic->ic_channels[1];
7580 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7581 	    nchan < sc->sc_capa_n_scan_channels;
7582 	    c++) {
7583 		if (c->ic_flags == 0)
7584 			continue;
7585 
7586 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
7587 		chan->iter_count = htole16(1);
7588 		chan->iter_interval = 0;
7589 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
7590 		if (n_ssids != 0 && !bgscan)
7591 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
7592 		chan++;
7593 		nchan++;
7594 	}
7595 
7596 	return nchan;
7597 }
7598 
7599 uint8_t
iwm_umac_scan_fill_channels(struct iwm_softc * sc,struct iwm_scan_channel_cfg_umac * chan,int n_ssids,int bgscan)7600 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
7601     struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
7602 {
7603 	struct ieee80211com *ic = &sc->sc_ic;
7604 	struct ieee80211_channel *c;
7605 	uint8_t nchan;
7606 
7607 	for (nchan = 0, c = &ic->ic_channels[1];
7608 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7609 	    nchan < sc->sc_capa_n_scan_channels;
7610 	    c++) {
7611 		if (c->ic_flags == 0)
7612 			continue;
7613 
7614 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
7615 		chan->iter_count = 1;
7616 		chan->iter_interval = htole16(0);
7617 		if (n_ssids != 0 && !bgscan)
7618 			chan->flags = htole32(1 << 0); /* select SSID 0 */
7619 		chan++;
7620 		nchan++;
7621 	}
7622 
7623 	return nchan;
7624 }
7625 
7626 int
iwm_fill_probe_req_v1(struct iwm_softc * sc,struct iwm_scan_probe_req_v1 * preq1)7627 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
7628 {
7629 	struct iwm_scan_probe_req preq2;
7630 	int err, i;
7631 
7632 	err = iwm_fill_probe_req(sc, &preq2);
7633 	if (err)
7634 		return err;
7635 
7636 	preq1->mac_header = preq2.mac_header;
7637 	for (i = 0; i < nitems(preq1->band_data); i++)
7638 		preq1->band_data[i] = preq2.band_data[i];
7639 	preq1->common_data = preq2.common_data;
7640 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
7641 	return 0;
7642 }
7643 
7644 int
iwm_fill_probe_req(struct iwm_softc * sc,struct iwm_scan_probe_req * preq)7645 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
7646 {
7647 	struct ieee80211com *ic = &sc->sc_ic;
7648 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
7649 	struct ieee80211_rateset *rs;
7650 	size_t remain = sizeof(preq->buf);
7651 	uint8_t *frm, *pos;
7652 
7653 	memset(preq, 0, sizeof(*preq));
7654 
7655 	if (remain < sizeof(*wh) + 2)
7656 		return ENOBUFS;
7657 
7658 	/*
7659 	 * Build a probe request frame.  Most of the following code is a
7660 	 * copy & paste of what is done in net80211.
7661 	 */
7662 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
7663 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
7664 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
7665 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
7666 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
7667 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
7668 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
7669 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
7670 
7671 	frm = (uint8_t *)(wh + 1);
7672 
7673 	*frm++ = IEEE80211_ELEMID_SSID;
7674 	*frm++ = 0;
7675 	/* hardware inserts SSID */
7676 
7677 	/* Tell firmware where the MAC header and SSID IE are. */
7678 	preq->mac_header.offset = 0;
7679 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
7680 	remain -= frm - (uint8_t *)wh;
7681 
7682 	/* Fill in 2GHz IEs and tell firmware where they are. */
7683 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
7684 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7685 		if (remain < 4 + rs->rs_nrates)
7686 			return ENOBUFS;
7687 	} else if (remain < 2 + rs->rs_nrates)
7688 		return ENOBUFS;
7689 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
7690 	pos = frm;
7691 	frm = ieee80211_add_rates(frm, rs);
7692 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7693 		frm = ieee80211_add_xrates(frm, rs);
7694 	remain -= frm - pos;
7695 
7696 	if (isset(sc->sc_enabled_capa,
7697 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
7698 		if (remain < 3)
7699 			return ENOBUFS;
7700 		*frm++ = IEEE80211_ELEMID_DSPARMS;
7701 		*frm++ = 1;
7702 		*frm++ = 0;
7703 		remain -= 3;
7704 	}
7705 	preq->band_data[0].len = htole16(frm - pos);
7706 
7707 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
7708 		/* Fill in 5GHz IEs. */
7709 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
7710 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
7711 			if (remain < 4 + rs->rs_nrates)
7712 				return ENOBUFS;
7713 		} else if (remain < 2 + rs->rs_nrates)
7714 			return ENOBUFS;
7715 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
7716 		pos = frm;
7717 		frm = ieee80211_add_rates(frm, rs);
7718 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
7719 			frm = ieee80211_add_xrates(frm, rs);
7720 		preq->band_data[1].len = htole16(frm - pos);
7721 		remain -= frm - pos;
7722 		if (ic->ic_flags & IEEE80211_F_VHTON) {
7723 			if (remain < 14)
7724 				return ENOBUFS;
7725 			frm = ieee80211_add_vhtcaps(frm, ic);
7726 			remain -= frm - pos;
7727 			preq->band_data[1].len = htole16(frm - pos);
7728 		}
7729 	}
7730 
7731 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
7732 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
7733 	pos = frm;
7734 	if (ic->ic_flags & IEEE80211_F_HTON) {
7735 		if (remain < 28)
7736 			return ENOBUFS;
7737 		frm = ieee80211_add_htcaps(frm, ic);
7738 		/* XXX add WME info? */
7739 		remain -= frm - pos;
7740 	}
7741 
7742 	preq->common_data.len = htole16(frm - pos);
7743 
7744 	return 0;
7745 }
7746 
7747 int
iwm_lmac_scan(struct iwm_softc * sc,int bgscan)7748 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
7749 {
7750 	struct ieee80211com *ic = &sc->sc_ic;
7751 	struct iwm_host_cmd hcmd = {
7752 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
7753 		.len = { 0, },
7754 		.data = { NULL, },
7755 		.flags = 0,
7756 	};
7757 	struct iwm_scan_req_lmac *req;
7758 	struct iwm_scan_probe_req_v1 *preq;
7759 	size_t req_len;
7760 	int err, async = bgscan;
7761 
7762 	req_len = sizeof(struct iwm_scan_req_lmac) +
7763 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7764 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
7765 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
7766 		return ENOMEM;
7767 	req = malloc(req_len, M_DEVBUF,
7768 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7769 	if (req == NULL)
7770 		return ENOMEM;
7771 
7772 	hcmd.len[0] = (uint16_t)req_len;
7773 	hcmd.data[0] = (void *)req;
7774 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
7775 
7776 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7777 	req->active_dwell = 10;
7778 	req->passive_dwell = 110;
7779 	req->fragmented_dwell = 44;
7780 	req->extended_dwell = 90;
7781 	if (bgscan) {
7782 		req->max_out_time = htole32(120);
7783 		req->suspend_time = htole32(120);
7784 	} else {
7785 		req->max_out_time = htole32(0);
7786 		req->suspend_time = htole32(0);
7787 	}
7788 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
7789 	req->rx_chain_select = iwm_scan_rx_chain(sc);
7790 	req->iter_num = htole32(1);
7791 	req->delay = 0;
7792 
7793 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
7794 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
7795 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
7796 	if (ic->ic_des_esslen == 0)
7797 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
7798 	else
7799 		req->scan_flags |=
7800 		    htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
7801 	if (isset(sc->sc_enabled_capa,
7802 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
7803 	    isset(sc->sc_enabled_capa,
7804 	    IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
7805 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
7806 
7807 	req->flags = htole32(IWM_PHY_BAND_24);
7808 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7809 		req->flags |= htole32(IWM_PHY_BAND_5);
7810 	req->filter_flags =
7811 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
7812 
7813 	/* Tx flags 2 GHz. */
7814 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7815 	    IWM_TX_CMD_FLG_BT_DIS);
7816 	req->tx_cmd[0].rate_n_flags =
7817 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
7818 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
7819 
7820 	/* Tx flags 5 GHz. */
7821 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
7822 	    IWM_TX_CMD_FLG_BT_DIS);
7823 	req->tx_cmd[1].rate_n_flags =
7824 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
7825 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
7826 
7827 	/* Check if we're doing an active directed scan. */
7828 	if (ic->ic_des_esslen != 0) {
7829 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
7830 		req->direct_scan[0].len = ic->ic_des_esslen;
7831 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
7832 		    ic->ic_des_esslen);
7833 	}
7834 
7835 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
7836 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
7837 	    ic->ic_des_esslen != 0, bgscan);
7838 
7839 	preq = (struct iwm_scan_probe_req_v1 *)(req->data +
7840 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
7841 	    sc->sc_capa_n_scan_channels));
7842 	err = iwm_fill_probe_req_v1(sc, preq);
7843 	if (err) {
7844 		free(req, M_DEVBUF, req_len);
7845 		return err;
7846 	}
7847 
7848 	/* Specify the scan plan: We'll do one iteration. */
7849 	req->schedule[0].iterations = 1;
7850 	req->schedule[0].full_scan_mul = 1;
7851 
7852 	/* Disable EBS. */
7853 	req->channel_opt[0].non_ebs_ratio = 1;
7854 	req->channel_opt[1].non_ebs_ratio = 1;
7855 
7856 	err = iwm_send_cmd(sc, &hcmd);
7857 	free(req, M_DEVBUF, req_len);
7858 	return err;
7859 }
7860 
7861 int
iwm_config_umac_scan(struct iwm_softc * sc)7862 iwm_config_umac_scan(struct iwm_softc *sc)
7863 {
7864 	struct ieee80211com *ic = &sc->sc_ic;
7865 	struct iwm_scan_config *scan_config;
7866 	int err, nchan;
7867 	size_t cmd_size;
7868 	struct ieee80211_channel *c;
7869 	struct iwm_host_cmd hcmd = {
7870 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_LONG_GROUP, 0),
7871 		.flags = 0,
7872 	};
7873 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
7874 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
7875 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
7876 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
7877 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
7878 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
7879 	    IWM_SCAN_CONFIG_RATE_54M);
7880 
7881 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
7882 
7883 	scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
7884 	if (scan_config == NULL)
7885 		return ENOMEM;
7886 
7887 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
7888 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
7889 	scan_config->legacy_rates = htole32(rates |
7890 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
7891 
7892 	/* These timings correspond to iwlwifi's UNASSOC scan. */
7893 	scan_config->dwell_active = 10;
7894 	scan_config->dwell_passive = 110;
7895 	scan_config->dwell_fragmented = 44;
7896 	scan_config->dwell_extended = 90;
7897 	scan_config->out_of_channel_time = htole32(0);
7898 	scan_config->suspend_time = htole32(0);
7899 
7900 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
7901 
7902 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
7903 	scan_config->channel_flags = 0;
7904 
7905 	for (c = &ic->ic_channels[1], nchan = 0;
7906 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
7907 	    nchan < sc->sc_capa_n_scan_channels; c++) {
7908 		if (c->ic_flags == 0)
7909 			continue;
7910 		scan_config->channel_array[nchan++] =
7911 		    ieee80211_mhz2ieee(c->ic_freq, 0);
7912 	}
7913 
7914 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
7915 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
7916 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
7917 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
7918 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
7919 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
7920 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
7921 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
7922 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
7923 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
7924 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
7925 
7926 	hcmd.data[0] = scan_config;
7927 	hcmd.len[0] = cmd_size;
7928 
7929 	err = iwm_send_cmd(sc, &hcmd);
7930 	free(scan_config, M_DEVBUF, cmd_size);
7931 	return err;
7932 }
7933 
7934 int
iwm_umac_scan_size(struct iwm_softc * sc)7935 iwm_umac_scan_size(struct iwm_softc *sc)
7936 {
7937 	int base_size = IWM_SCAN_REQ_UMAC_SIZE_V1;
7938 	int tail_size;
7939 
7940 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7941 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
7942 	else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7943 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
7944 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
7945 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
7946 	else
7947 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
7948 
7949 	return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
7950 	    sc->sc_capa_n_scan_channels + tail_size;
7951 }
7952 
7953 struct iwm_scan_umac_chan_param *
iwm_get_scan_req_umac_chan_param(struct iwm_softc * sc,struct iwm_scan_req_umac * req)7954 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
7955     struct iwm_scan_req_umac *req)
7956 {
7957 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7958 		return &req->v8.channel;
7959 
7960 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7961 		return &req->v7.channel;
7962 
7963 	return &req->v1.channel;
7964 }
7965 
7966 void *
iwm_get_scan_req_umac_data(struct iwm_softc * sc,struct iwm_scan_req_umac * req)7967 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
7968 {
7969 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7970 		return (void *)&req->v8.data;
7971 
7972 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7973 		return (void *)&req->v7.data;
7974 
7975 	return (void *)&req->v1.data;
7976 
7977 }
7978 
7979 /* adaptive dwell max budget time [TU] for full scan */
7980 #define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
7981 /* adaptive dwell max budget time [TU] for directed scan */
7982 #define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
7983 /* adaptive dwell default high band APs number */
7984 #define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS 8
7985 /* adaptive dwell default low band APs number */
7986 #define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS 2
7987 /* adaptive dwell default APs number in social channels (1, 6, 11) */
7988 #define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
7989 
7990 int
iwm_umac_scan(struct iwm_softc * sc,int bgscan)7991 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
7992 {
7993 	struct ieee80211com *ic = &sc->sc_ic;
7994 	struct iwm_host_cmd hcmd = {
7995 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_LONG_GROUP, 0),
7996 		.len = { 0, },
7997 		.data = { NULL, },
7998 		.flags = 0,
7999 	};
8000 	struct iwm_scan_req_umac *req;
8001 	void *cmd_data, *tail_data;
8002 	struct iwm_scan_req_umac_tail_v2 *tail;
8003 	struct iwm_scan_req_umac_tail_v1 *tailv1;
8004 	struct iwm_scan_umac_chan_param *chanparam;
8005 	size_t req_len;
8006 	int err, async = bgscan;
8007 
8008 	req_len = iwm_umac_scan_size(sc);
8009 	if ((req_len < IWM_SCAN_REQ_UMAC_SIZE_V1 +
8010 	    sizeof(struct iwm_scan_req_umac_tail_v1)) ||
8011 	    req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
8012 		return ERANGE;
8013 	req = malloc(req_len, M_DEVBUF,
8014 	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
8015 	if (req == NULL)
8016 		return ENOMEM;
8017 
8018 	hcmd.len[0] = (uint16_t)req_len;
8019 	hcmd.data[0] = (void *)req;
8020 	hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
8021 
8022 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8023 		req->v7.adwell_default_n_aps_social =
8024 			IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
8025 		req->v7.adwell_default_n_aps =
8026 			IWM_SCAN_ADWELL_DEFAULT_LB_N_APS;
8027 
8028 		if (ic->ic_des_esslen != 0)
8029 			req->v7.adwell_max_budget =
8030 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
8031 		else
8032 			req->v7.adwell_max_budget =
8033 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
8034 
8035 		req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8036 		req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = 0;
8037 		req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = 0;
8038 
8039 		if (isset(sc->sc_ucode_api,
8040 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8041 			req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX] = 10;
8042 			req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX] = 110;
8043 		} else {
8044 			req->v7.active_dwell = 10;
8045 			req->v7.passive_dwell = 110;
8046 			req->v7.fragmented_dwell = 44;
8047 		}
8048 	} else {
8049 		/* These timings correspond to iwlwifi's UNASSOC scan. */
8050 		req->v1.active_dwell = 10;
8051 		req->v1.passive_dwell = 110;
8052 		req->v1.fragmented_dwell = 44;
8053 		req->v1.extended_dwell = 90;
8054 
8055 		req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8056 	}
8057 
8058 	if (bgscan) {
8059 		const uint32_t timeout = htole32(120);
8060 		if (isset(sc->sc_ucode_api,
8061 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8062 			req->v8.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8063 			req->v8.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8064 		} else if (isset(sc->sc_ucode_api,
8065 		    IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8066 			req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8067 			req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = timeout;
8068 		} else {
8069 			req->v1.max_out_time = timeout;
8070 			req->v1.suspend_time = timeout;
8071 		}
8072 	}
8073 
8074 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
8075 
8076 	cmd_data = iwm_get_scan_req_umac_data(sc, req);
8077 	chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
8078 	chanparam->count = iwm_umac_scan_fill_channels(sc,
8079 	    (struct iwm_scan_channel_cfg_umac *)cmd_data,
8080 	    ic->ic_des_esslen != 0, bgscan);
8081 	chanparam->flags = 0;
8082 
8083 	tail_data = cmd_data + sizeof(struct iwm_scan_channel_cfg_umac) *
8084 	    sc->sc_capa_n_scan_channels;
8085 	tail = tail_data;
8086 	/* tail v1 layout differs in preq and direct_scan member fields. */
8087 	tailv1 = tail_data;
8088 
8089 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
8090 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE);
8091 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8092 		req->v8.general_flags2 =
8093 			IWM_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
8094 	}
8095 
8096 	if (ic->ic_des_esslen != 0) {
8097 		if (isset(sc->sc_ucode_api,
8098 		    IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
8099 			tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
8100 			tail->direct_scan[0].len = ic->ic_des_esslen;
8101 			memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
8102 			    ic->ic_des_esslen);
8103 		} else {
8104 			tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
8105 			tailv1->direct_scan[0].len = ic->ic_des_esslen;
8106 			memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
8107 			    ic->ic_des_esslen);
8108 		}
8109 		req->general_flags |=
8110 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
8111 	} else
8112 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
8113 
8114 	if (isset(sc->sc_enabled_capa,
8115 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) &&
8116 	    isset(sc->sc_enabled_capa,
8117 	    IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
8118 		req->general_flags |=
8119 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
8120 
8121 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8122 		req->general_flags |=
8123 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL);
8124 	} else {
8125 		req->general_flags |=
8126 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
8127 	}
8128 
8129 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
8130 		err = iwm_fill_probe_req(sc, &tail->preq);
8131 	else
8132 		err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
8133 	if (err) {
8134 		free(req, M_DEVBUF, req_len);
8135 		return err;
8136 	}
8137 
8138 	/* Specify the scan plan: We'll do one iteration. */
8139 	tail->schedule[0].interval = 0;
8140 	tail->schedule[0].iter_count = 1;
8141 
8142 	err = iwm_send_cmd(sc, &hcmd);
8143 	free(req, M_DEVBUF, req_len);
8144 	return err;
8145 }
8146 
8147 void
iwm_mcc_update(struct iwm_softc * sc,struct iwm_mcc_chub_notif * notif)8148 iwm_mcc_update(struct iwm_softc *sc, struct iwm_mcc_chub_notif *notif)
8149 {
8150 	struct ieee80211com *ic = &sc->sc_ic;
8151 	struct ifnet *ifp = IC2IFP(ic);
8152 	char alpha2[3];
8153 
8154 	snprintf(alpha2, sizeof(alpha2), "%c%c",
8155 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
8156 
8157 	if (ifp->if_flags & IFF_DEBUG) {
8158 		printf("%s: firmware has detected regulatory domain '%s' "
8159 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
8160 	}
8161 
8162 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
8163 }
8164 
8165 uint8_t
iwm_ridx2rate(struct ieee80211_rateset * rs,int ridx)8166 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
8167 {
8168 	int i;
8169 	uint8_t rval;
8170 
8171 	for (i = 0; i < rs->rs_nrates; i++) {
8172 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
8173 		if (rval == iwm_rates[ridx].rate)
8174 			return rs->rs_rates[i];
8175 	}
8176 
8177 	return 0;
8178 }
8179 
8180 int
iwm_rval2ridx(int rval)8181 iwm_rval2ridx(int rval)
8182 {
8183 	int ridx;
8184 
8185 	for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
8186 		if (iwm_rates[ridx].plcp == IWM_RATE_INVM_PLCP)
8187 			continue;
8188 		if (rval == iwm_rates[ridx].rate)
8189 			break;
8190 	}
8191 
8192        return ridx;
8193 }
8194 
8195 void
iwm_ack_rates(struct iwm_softc * sc,struct iwm_node * in,int * cck_rates,int * ofdm_rates)8196 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
8197     int *ofdm_rates)
8198 {
8199 	struct ieee80211_node *ni = &in->in_ni;
8200 	struct ieee80211_rateset *rs = &ni->ni_rates;
8201 	int lowest_present_ofdm = -1;
8202 	int lowest_present_cck = -1;
8203 	uint8_t cck = 0;
8204 	uint8_t ofdm = 0;
8205 	int i;
8206 
8207 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
8208 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
8209 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
8210 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
8211 				continue;
8212 			cck |= (1 << i);
8213 			if (lowest_present_cck == -1 || lowest_present_cck > i)
8214 				lowest_present_cck = i;
8215 		}
8216 	}
8217 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
8218 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
8219 			continue;
8220 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
8221 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
8222 			lowest_present_ofdm = i;
8223 	}
8224 
8225 	/*
8226 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
8227 	 * variables. This isn't sufficient though, as there might not
8228 	 * be all the right rates in the bitmap. E.g. if the only basic
8229 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
8230 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
8231 	 *
8232 	 *    [...] a STA responding to a received frame shall transmit
8233 	 *    its Control Response frame [...] at the highest rate in the
8234 	 *    BSSBasicRateSet parameter that is less than or equal to the
8235 	 *    rate of the immediately previous frame in the frame exchange
8236 	 *    sequence ([...]) and that is of the same modulation class
8237 	 *    ([...]) as the received frame. If no rate contained in the
8238 	 *    BSSBasicRateSet parameter meets these conditions, then the
8239 	 *    control frame sent in response to a received frame shall be
8240 	 *    transmitted at the highest mandatory rate of the PHY that is
8241 	 *    less than or equal to the rate of the received frame, and
8242 	 *    that is of the same modulation class as the received frame.
8243 	 *
8244 	 * As a consequence, we need to add all mandatory rates that are
8245 	 * lower than all of the basic rates to these bitmaps.
8246 	 */
8247 
8248 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
8249 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
8250 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
8251 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
8252 	/* 6M already there or needed so always add */
8253 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
8254 
8255 	/*
8256 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
8257 	 * Note, however:
8258 	 *  - if no CCK rates are basic, it must be ERP since there must
8259 	 *    be some basic rates at all, so they're OFDM => ERP PHY
8260 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
8261 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
8262 	 *  - if 5.5M is basic, 1M and 2M are mandatory
8263 	 *  - if 2M is basic, 1M is mandatory
8264 	 *  - if 1M is basic, that's the only valid ACK rate.
8265 	 * As a consequence, it's not as complicated as it sounds, just add
8266 	 * any lower rates to the ACK rate bitmap.
8267 	 */
8268 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
8269 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
8270 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
8271 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
8272 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
8273 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
8274 	/* 1M already there or needed so always add */
8275 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
8276 
8277 	*cck_rates = cck;
8278 	*ofdm_rates = ofdm;
8279 }
8280 
8281 void
iwm_mac_ctxt_cmd_common(struct iwm_softc * sc,struct iwm_node * in,struct iwm_mac_ctx_cmd * cmd,uint32_t action)8282 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
8283     struct iwm_mac_ctx_cmd *cmd, uint32_t action)
8284 {
8285 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
8286 	struct ieee80211com *ic = &sc->sc_ic;
8287 	struct ieee80211_node *ni = ic->ic_bss;
8288 	int cck_ack_rates, ofdm_ack_rates;
8289 	int i;
8290 
8291 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
8292 	    in->in_color));
8293 	cmd->action = htole32(action);
8294 
8295 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8296 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
8297 	else if (ic->ic_opmode == IEEE80211_M_STA)
8298 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
8299 	else
8300 		panic("unsupported operating mode %d", ic->ic_opmode);
8301 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
8302 
8303 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
8304 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8305 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
8306 		return;
8307 	}
8308 
8309 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
8310 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
8311 	cmd->cck_rates = htole32(cck_ack_rates);
8312 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
8313 
8314 	cmd->cck_short_preamble
8315 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
8316 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
8317 	cmd->short_slot
8318 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
8319 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
8320 
8321 	for (i = 0; i < EDCA_NUM_AC; i++) {
8322 		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
8323 		int txf = iwm_ac_to_tx_fifo[i];
8324 
8325 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
8326 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
8327 		cmd->ac[txf].aifsn = ac->ac_aifsn;
8328 		cmd->ac[txf].fifos_mask = (1 << txf);
8329 		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
8330 	}
8331 	if (ni->ni_flags & IEEE80211_NODE_QOS)
8332 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
8333 
8334 	if (ni->ni_flags & IEEE80211_NODE_HT) {
8335 		enum ieee80211_htprot htprot =
8336 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
8337 		switch (htprot) {
8338 		case IEEE80211_HTPROT_NONE:
8339 			break;
8340 		case IEEE80211_HTPROT_NONMEMBER:
8341 		case IEEE80211_HTPROT_NONHT_MIXED:
8342 			cmd->protection_flags |=
8343 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
8344 			    IWM_MAC_PROT_FLG_FAT_PROT);
8345 			break;
8346 		case IEEE80211_HTPROT_20MHZ:
8347 			if (in->in_phyctxt &&
8348 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
8349 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
8350 				cmd->protection_flags |=
8351 				    htole32(IWM_MAC_PROT_FLG_HT_PROT |
8352 				    IWM_MAC_PROT_FLG_FAT_PROT);
8353 			}
8354 			break;
8355 		default:
8356 			break;
8357 		}
8358 
8359 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
8360 	}
8361 	if (ic->ic_flags & IEEE80211_F_USEPROT)
8362 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
8363 
8364 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
8365 #undef IWM_EXP2
8366 }
8367 
8368 void
iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc * sc,struct iwm_node * in,struct iwm_mac_data_sta * sta,int assoc)8369 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
8370     struct iwm_mac_data_sta *sta, int assoc)
8371 {
8372 	struct ieee80211_node *ni = &in->in_ni;
8373 	uint32_t dtim_off;
8374 	uint64_t tsf;
8375 
8376 	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
8377 	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
8378 	tsf = letoh64(tsf);
8379 
8380 	sta->is_assoc = htole32(assoc);
8381 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
8382 	sta->dtim_tsf = htole64(tsf + dtim_off);
8383 	sta->bi = htole32(ni->ni_intval);
8384 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
8385 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
8386 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
8387 	sta->listen_interval = htole32(10);
8388 	sta->assoc_id = htole32(ni->ni_associd);
8389 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
8390 }
8391 
8392 int
iwm_mac_ctxt_cmd(struct iwm_softc * sc,struct iwm_node * in,uint32_t action,int assoc)8393 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
8394     int assoc)
8395 {
8396 	struct ieee80211com *ic = &sc->sc_ic;
8397 	struct ieee80211_node *ni = &in->in_ni;
8398 	struct iwm_mac_ctx_cmd cmd;
8399 	int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
8400 
8401 	if (action == IWM_FW_CTXT_ACTION_ADD && active)
8402 		panic("MAC already added");
8403 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
8404 		panic("MAC already removed");
8405 
8406 	memset(&cmd, 0, sizeof(cmd));
8407 
8408 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
8409 
8410 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8411 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |
8412 		    IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |
8413 		    IWM_MAC_FILTER_ACCEPT_GRP |
8414 		    IWM_MAC_FILTER_IN_BEACON |
8415 		    IWM_MAC_FILTER_IN_PROBE_REQUEST |
8416 		    IWM_MAC_FILTER_IN_CRC32);
8417 	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
8418 		/*
8419 		 * Allow beacons to pass through as long as we are not
8420 		 * associated or we do not have dtim period information.
8421 		 */
8422 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
8423 	else
8424 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
8425 
8426 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
8427 }
8428 
8429 int
iwm_update_quotas(struct iwm_softc * sc,struct iwm_node * in,int running)8430 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
8431 {
8432 	struct iwm_time_quota_cmd_v1 cmd;
8433 	int i, idx, num_active_macs, quota, quota_rem;
8434 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
8435 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
8436 	uint16_t id;
8437 
8438 	memset(&cmd, 0, sizeof(cmd));
8439 
8440 	/* currently, PHY ID == binding ID */
8441 	if (in && in->in_phyctxt) {
8442 		id = in->in_phyctxt->id;
8443 		KASSERT(id < IWM_MAX_BINDINGS);
8444 		colors[id] = in->in_phyctxt->color;
8445 		if (running)
8446 			n_ifs[id] = 1;
8447 	}
8448 
8449 	/*
8450 	 * The FW's scheduling session consists of
8451 	 * IWM_MAX_QUOTA fragments. Divide these fragments
8452 	 * equally between all the bindings that require quota
8453 	 */
8454 	num_active_macs = 0;
8455 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
8456 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
8457 		num_active_macs += n_ifs[i];
8458 	}
8459 
8460 	quota = 0;
8461 	quota_rem = 0;
8462 	if (num_active_macs) {
8463 		quota = IWM_MAX_QUOTA / num_active_macs;
8464 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
8465 	}
8466 
8467 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
8468 		if (colors[i] < 0)
8469 			continue;
8470 
8471 		cmd.quotas[idx].id_and_color =
8472 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
8473 
8474 		if (n_ifs[i] <= 0) {
8475 			cmd.quotas[idx].quota = htole32(0);
8476 			cmd.quotas[idx].max_duration = htole32(0);
8477 		} else {
8478 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
8479 			cmd.quotas[idx].max_duration = htole32(0);
8480 		}
8481 		idx++;
8482 	}
8483 
8484 	/* Give the remainder of the session to the first binding */
8485 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
8486 
8487 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_QUOTA_LOW_LATENCY)) {
8488 		struct iwm_time_quota_cmd cmd_v2;
8489 
8490 		memset(&cmd_v2, 0, sizeof(cmd_v2));
8491 		for (i = 0; i < IWM_MAX_BINDINGS; i++) {
8492 			cmd_v2.quotas[i].id_and_color =
8493 			    cmd.quotas[i].id_and_color;
8494 			cmd_v2.quotas[i].quota = cmd.quotas[i].quota;
8495 			cmd_v2.quotas[i].max_duration =
8496 			    cmd.quotas[i].max_duration;
8497 		}
8498 		return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
8499 		    sizeof(cmd_v2), &cmd_v2);
8500 	}
8501 
8502 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
8503 }
8504 
8505 void
iwm_add_task(struct iwm_softc * sc,struct taskq * taskq,struct task * task)8506 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8507 {
8508 	int s = splnet();
8509 
8510 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
8511 		splx(s);
8512 		return;
8513 	}
8514 
8515 	refcnt_take(&sc->task_refs);
8516 	if (!task_add(taskq, task))
8517 		refcnt_rele_wake(&sc->task_refs);
8518 	splx(s);
8519 }
8520 
8521 void
iwm_del_task(struct iwm_softc * sc,struct taskq * taskq,struct task * task)8522 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8523 {
8524 	if (task_del(taskq, task))
8525 		refcnt_rele(&sc->task_refs);
8526 }
8527 
8528 int
iwm_scan(struct iwm_softc * sc)8529 iwm_scan(struct iwm_softc *sc)
8530 {
8531 	struct ieee80211com *ic = &sc->sc_ic;
8532 	struct ifnet *ifp = IC2IFP(ic);
8533 	int err;
8534 
8535 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
8536 		err = iwm_scan_abort(sc);
8537 		if (err) {
8538 			printf("%s: could not abort background scan\n",
8539 			    DEVNAME(sc));
8540 			return err;
8541 		}
8542 	}
8543 
8544 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8545 		err = iwm_umac_scan(sc, 0);
8546 	else
8547 		err = iwm_lmac_scan(sc, 0);
8548 	if (err) {
8549 		printf("%s: could not initiate scan\n", DEVNAME(sc));
8550 		return err;
8551 	}
8552 
8553 	/*
8554 	 * The current mode might have been fixed during association.
8555 	 * Ensure all channels get scanned.
8556 	 */
8557 	if (IFM_SUBTYPE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
8558 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
8559 
8560 	sc->sc_flags |= IWM_FLAG_SCANNING;
8561 	if (ifp->if_flags & IFF_DEBUG)
8562 		printf("%s: %s -> %s\n", ifp->if_xname,
8563 		    ieee80211_state_name[ic->ic_state],
8564 		    ieee80211_state_name[IEEE80211_S_SCAN]);
8565 	if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
8566 		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
8567 		ieee80211_node_cleanup(ic, ic->ic_bss);
8568 	}
8569 	ic->ic_state = IEEE80211_S_SCAN;
8570 	iwm_led_blink_start(sc);
8571 	wakeup(&ic->ic_state); /* wake iwm_init() */
8572 
8573 	return 0;
8574 }
8575 
8576 int
iwm_bgscan(struct ieee80211com * ic)8577 iwm_bgscan(struct ieee80211com *ic)
8578 {
8579 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
8580 	int err;
8581 
8582 	if (sc->sc_flags & IWM_FLAG_SCANNING)
8583 		return 0;
8584 
8585 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8586 		err = iwm_umac_scan(sc, 1);
8587 	else
8588 		err = iwm_lmac_scan(sc, 1);
8589 	if (err) {
8590 		printf("%s: could not initiate scan\n", DEVNAME(sc));
8591 		return err;
8592 	}
8593 
8594 	sc->sc_flags |= IWM_FLAG_BGSCAN;
8595 	return 0;
8596 }
8597 
8598 void
iwm_bgscan_done(struct ieee80211com * ic,struct ieee80211_node_switch_bss_arg * arg,size_t arg_size)8599 iwm_bgscan_done(struct ieee80211com *ic,
8600     struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
8601 {
8602 	struct iwm_softc *sc = ic->ic_softc;
8603 
8604 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8605 	sc->bgscan_unref_arg = arg;
8606 	sc->bgscan_unref_arg_size = arg_size;
8607 	iwm_add_task(sc, systq, &sc->bgscan_done_task);
8608 }
8609 
8610 void
iwm_bgscan_done_task(void * arg)8611 iwm_bgscan_done_task(void *arg)
8612 {
8613 	struct iwm_softc *sc = arg;
8614 	struct ieee80211com *ic = &sc->sc_ic;
8615 	struct iwm_node *in = (void *)ic->ic_bss;
8616 	struct ieee80211_node *ni = &in->in_ni;
8617 	int tid, err = 0, s = splnet();
8618 
8619 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
8620 	    (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
8621 	    ic->ic_state != IEEE80211_S_RUN) {
8622 		err = ENXIO;
8623 		goto done;
8624 	}
8625 
8626 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
8627 		int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
8628 
8629 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
8630 			continue;
8631 
8632 		err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
8633 		if (err)
8634 			goto done;
8635 		err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
8636 		if (err)
8637 			goto done;
8638 		in->tfd_queue_msk &= ~(1 << qid);
8639 #if 0 /* disabled for now; we are going to DEAUTH soon anyway */
8640 		IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
8641 		    IEEE80211_ACTION_DELBA,
8642 		    IEEE80211_REASON_AUTH_LEAVE << 16 |
8643 		    IEEE80211_FC1_DIR_TODS << 8 | tid);
8644 #endif
8645 		ieee80211_node_tx_ba_clear(ni, tid);
8646 	}
8647 
8648 	err = iwm_flush_sta(sc, in);
8649 	if (err)
8650 		goto done;
8651 
8652 	/*
8653 	 * Tx queues have been flushed and Tx agg has been stopped.
8654 	 * Allow roaming to proceed.
8655 	 */
8656 	ni->ni_unref_arg = sc->bgscan_unref_arg;
8657 	ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
8658 	sc->bgscan_unref_arg = NULL;
8659 	sc->bgscan_unref_arg_size = 0;
8660 	ieee80211_node_tx_stopped(ic, &in->in_ni);
8661 done:
8662 	if (err) {
8663 		free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8664 		sc->bgscan_unref_arg = NULL;
8665 		sc->bgscan_unref_arg_size = 0;
8666 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8667 			task_add(systq, &sc->init_task);
8668 	}
8669 	refcnt_rele_wake(&sc->task_refs);
8670 	splx(s);
8671 }
8672 
8673 int
iwm_umac_scan_abort(struct iwm_softc * sc)8674 iwm_umac_scan_abort(struct iwm_softc *sc)
8675 {
8676 	struct iwm_umac_scan_abort cmd = { 0 };
8677 
8678 	return iwm_send_cmd_pdu(sc,
8679 	    IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
8680 	    0, sizeof(cmd), &cmd);
8681 }
8682 
8683 int
iwm_lmac_scan_abort(struct iwm_softc * sc)8684 iwm_lmac_scan_abort(struct iwm_softc *sc)
8685 {
8686 	struct iwm_host_cmd cmd = {
8687 		.id = IWM_SCAN_OFFLOAD_ABORT_CMD,
8688 	};
8689 	int err, status;
8690 
8691 	err = iwm_send_cmd_status(sc, &cmd, &status);
8692 	if (err)
8693 		return err;
8694 
8695 	if (status != IWM_CAN_ABORT_STATUS) {
8696 		/*
8697 		 * The scan abort will return 1 for success or
8698 		 * 2 for "failure".  A failure condition can be
8699 		 * due to simply not being in an active scan which
8700 		 * can occur if we send the scan abort before the
8701 		 * microcode has notified us that a scan is completed.
8702 		 */
8703 		return EBUSY;
8704 	}
8705 
8706 	return 0;
8707 }
8708 
8709 int
iwm_scan_abort(struct iwm_softc * sc)8710 iwm_scan_abort(struct iwm_softc *sc)
8711 {
8712 	int err;
8713 
8714 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8715 		err = iwm_umac_scan_abort(sc);
8716 	else
8717 		err = iwm_lmac_scan_abort(sc);
8718 
8719 	if (err == 0)
8720 		sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
8721 	return err;
8722 }
8723 
8724 int
iwm_phy_ctxt_update(struct iwm_softc * sc,struct iwm_phy_ctxt * phyctxt,struct ieee80211_channel * chan,uint8_t chains_static,uint8_t chains_dynamic,uint32_t apply_time,uint8_t sco,uint8_t vht_chan_width)8725 iwm_phy_ctxt_update(struct iwm_softc *sc, struct iwm_phy_ctxt *phyctxt,
8726     struct ieee80211_channel *chan, uint8_t chains_static,
8727     uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
8728     uint8_t vht_chan_width)
8729 {
8730 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
8731 	int err;
8732 
8733 	if (isset(sc->sc_enabled_capa,
8734 	    IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
8735 	    (phyctxt->channel->ic_flags & band_flags) !=
8736 	    (chan->ic_flags & band_flags)) {
8737 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8738 		    chains_dynamic, IWM_FW_CTXT_ACTION_REMOVE, apply_time, sco,
8739 		    vht_chan_width);
8740 		if (err) {
8741 			printf("%s: could not remove PHY context "
8742 			    "(error %d)\n", DEVNAME(sc), err);
8743 			return err;
8744 		}
8745 		phyctxt->channel = chan;
8746 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8747 		    chains_dynamic, IWM_FW_CTXT_ACTION_ADD, apply_time, sco,
8748 		    vht_chan_width);
8749 		if (err) {
8750 			printf("%s: could not add PHY context "
8751 			    "(error %d)\n", DEVNAME(sc), err);
8752 			return err;
8753 		}
8754 	} else {
8755 		phyctxt->channel = chan;
8756 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8757 		    chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, apply_time, sco,
8758 		    vht_chan_width);
8759 		if (err) {
8760 			printf("%s: could not update PHY context (error %d)\n",
8761 			    DEVNAME(sc), err);
8762 			return err;
8763 		}
8764 	}
8765 
8766 	phyctxt->sco = sco;
8767 	phyctxt->vht_chan_width = vht_chan_width;
8768 	return 0;
8769 }
8770 
8771 int
iwm_auth(struct iwm_softc * sc)8772 iwm_auth(struct iwm_softc *sc)
8773 {
8774 	struct ieee80211com *ic = &sc->sc_ic;
8775 	struct iwm_node *in = (void *)ic->ic_bss;
8776 	uint32_t duration;
8777 	int generation = sc->sc_generation, err;
8778 
8779 	splassert(IPL_NET);
8780 
8781 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8782 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8783 		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8784 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8785 		if (err)
8786 			return err;
8787 	} else {
8788 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8789 		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8790 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8791 		if (err)
8792 			return err;
8793 	}
8794 	in->in_phyctxt = &sc->sc_phyctxt[0];
8795 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
8796 	iwm_setrates(in, 0);
8797 
8798 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
8799 	if (err) {
8800 		printf("%s: could not add MAC context (error %d)\n",
8801 		    DEVNAME(sc), err);
8802 		return err;
8803  	}
8804 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
8805 
8806 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
8807 	if (err) {
8808 		printf("%s: could not add binding (error %d)\n",
8809 		    DEVNAME(sc), err);
8810 		goto rm_mac_ctxt;
8811 	}
8812 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
8813 
8814 	in->tid_disable_ampdu = 0xffff;
8815 	err = iwm_add_sta_cmd(sc, in, 0);
8816 	if (err) {
8817 		printf("%s: could not add sta (error %d)\n",
8818 		    DEVNAME(sc), err);
8819 		goto rm_binding;
8820 	}
8821 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
8822 
8823 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8824 		return 0;
8825 
8826 	/*
8827 	 * Prevent the FW from wandering off channel during association
8828 	 * by "protecting" the session with a time event.
8829 	 */
8830 	if (in->in_ni.ni_intval)
8831 		duration = in->in_ni.ni_intval * 2;
8832 	else
8833 		duration = IEEE80211_DUR_TU;
8834 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
8835 
8836 	return 0;
8837 
8838 rm_binding:
8839 	if (generation == sc->sc_generation) {
8840 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8841 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8842 	}
8843 rm_mac_ctxt:
8844 	if (generation == sc->sc_generation) {
8845 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8846 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8847 	}
8848 	return err;
8849 }
8850 
8851 int
iwm_deauth(struct iwm_softc * sc)8852 iwm_deauth(struct iwm_softc *sc)
8853 {
8854 	struct ieee80211com *ic = &sc->sc_ic;
8855 	struct iwm_node *in = (void *)ic->ic_bss;
8856 	int err;
8857 
8858 	splassert(IPL_NET);
8859 
8860 	iwm_unprotect_session(sc, in);
8861 
8862 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
8863 		err = iwm_flush_sta(sc, in);
8864 		if (err)
8865 			return err;
8866 		err = iwm_rm_sta_cmd(sc, in);
8867 		if (err) {
8868 			printf("%s: could not remove STA (error %d)\n",
8869 			    DEVNAME(sc), err);
8870 			return err;
8871 		}
8872 		in->tid_disable_ampdu = 0xffff;
8873 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
8874 		sc->sc_rx_ba_sessions = 0;
8875 		sc->ba_rx.start_tidmask = 0;
8876 		sc->ba_rx.stop_tidmask = 0;
8877 		sc->tx_ba_queue_mask = 0;
8878 		sc->ba_tx.start_tidmask = 0;
8879 		sc->ba_tx.stop_tidmask = 0;
8880 	}
8881 
8882 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
8883 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8884 		if (err) {
8885 			printf("%s: could not remove binding (error %d)\n",
8886 			    DEVNAME(sc), err);
8887 			return err;
8888 		}
8889 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8890 	}
8891 
8892 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
8893 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8894 		if (err) {
8895 			printf("%s: could not remove MAC context (error %d)\n",
8896 			    DEVNAME(sc), err);
8897 			return err;
8898 		}
8899 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8900 	}
8901 
8902 	/* Move unused PHY context to a default channel. */
8903 	err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8904 	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8905 	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8906 	if (err)
8907 		return err;
8908 
8909 	return 0;
8910 }
8911 
8912 int
iwm_run(struct iwm_softc * sc)8913 iwm_run(struct iwm_softc *sc)
8914 {
8915 	struct ieee80211com *ic = &sc->sc_ic;
8916 	struct iwm_node *in = (void *)ic->ic_bss;
8917 	struct ieee80211_node *ni = &in->in_ni;
8918 	int err;
8919 
8920 	splassert(IPL_NET);
8921 
8922 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8923 		/* Add a MAC context and a sniffing STA. */
8924 		err = iwm_auth(sc);
8925 		if (err)
8926 			return err;
8927 	}
8928 
8929 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
8930 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8931 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8932 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8933 		    in->in_phyctxt->channel, chains, chains,
8934 		    0, IEEE80211_HTOP0_SCO_SCN,
8935 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8936 		if (err) {
8937 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8938 			return err;
8939 		}
8940 	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
8941 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8942 		uint8_t sco, vht_chan_width;
8943 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
8944 		    ieee80211_node_supports_ht_chan40(ni))
8945 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
8946 		else
8947 			sco = IEEE80211_HTOP0_SCO_SCN;
8948 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
8949 		    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
8950 		    ieee80211_node_supports_vht_chan80(ni))
8951 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
8952 		else
8953 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
8954 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8955 		    in->in_phyctxt->channel, chains, chains,
8956 		    0, sco, vht_chan_width);
8957 		if (err) {
8958 			printf("%s: failed to update PHY\n", DEVNAME(sc));
8959 			return err;
8960 		}
8961 	}
8962 
8963 	/* Update STA again to apply HT and VHT settings. */
8964 	err = iwm_add_sta_cmd(sc, in, 1);
8965 	if (err) {
8966 		printf("%s: could not update STA (error %d)\n",
8967 		    DEVNAME(sc), err);
8968 		return err;
8969 	}
8970 
8971 	/* We have now been assigned an associd by the AP. */
8972 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
8973 	if (err) {
8974 		printf("%s: failed to update MAC\n", DEVNAME(sc));
8975 		return err;
8976 	}
8977 
8978 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
8979 	if (err) {
8980 		printf("%s: could not set sf full on (error %d)\n",
8981 		    DEVNAME(sc), err);
8982 		return err;
8983 	}
8984 
8985 	err = iwm_allow_mcast(sc);
8986 	if (err) {
8987 		printf("%s: could not allow mcast (error %d)\n",
8988 		    DEVNAME(sc), err);
8989 		return err;
8990 	}
8991 
8992 	err = iwm_power_update_device(sc);
8993 	if (err) {
8994 		printf("%s: could not send power command (error %d)\n",
8995 		    DEVNAME(sc), err);
8996 		return err;
8997 	}
8998 #ifdef notyet
8999 	/*
9000 	 * Disabled for now. Default beacon filter settings
9001 	 * prevent net80211 from getting ERP and HT protection
9002 	 * updates from beacons.
9003 	 */
9004 	err = iwm_enable_beacon_filter(sc, in);
9005 	if (err) {
9006 		printf("%s: could not enable beacon filter\n",
9007 		    DEVNAME(sc));
9008 		return err;
9009 	}
9010 #endif
9011 	err = iwm_power_mac_update_mode(sc, in);
9012 	if (err) {
9013 		printf("%s: could not update MAC power (error %d)\n",
9014 		    DEVNAME(sc), err);
9015 		return err;
9016 	}
9017 
9018 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
9019 		err = iwm_update_quotas(sc, in, 1);
9020 		if (err) {
9021 			printf("%s: could not update quotas (error %d)\n",
9022 			    DEVNAME(sc), err);
9023 			return err;
9024 		}
9025 	}
9026 
9027 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
9028 	ieee80211_ra_node_init(&in->in_rn);
9029 	ieee80211_ra_vht_node_init(&in->in_rn_vht);
9030 
9031 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9032 		iwm_led_blink_start(sc);
9033 		return 0;
9034 	}
9035 
9036 	/* Start at lowest available bit-rate, AMRR will raise. */
9037 	in->in_ni.ni_txrate = 0;
9038 	in->in_ni.ni_txmcs = 0;
9039 	in->in_ni.ni_vht_ss = 1;
9040 	iwm_setrates(in, 0);
9041 
9042 	timeout_add_msec(&sc->sc_calib_to, 500);
9043 	iwm_led_enable(sc);
9044 
9045 	return 0;
9046 }
9047 
9048 int
iwm_run_stop(struct iwm_softc * sc)9049 iwm_run_stop(struct iwm_softc *sc)
9050 {
9051 	struct ieee80211com *ic = &sc->sc_ic;
9052 	struct iwm_node *in = (void *)ic->ic_bss;
9053 	struct ieee80211_node *ni = &in->in_ni;
9054 	int err, i, tid;
9055 
9056 	splassert(IPL_NET);
9057 
9058 	/*
9059 	 * Stop Tx/Rx BA sessions now. We cannot rely on the BA task
9060 	 * for this when moving out of RUN state since it runs in a
9061 	 * separate thread.
9062 	 * Note that in->in_ni (struct ieee80211_node) already represents
9063 	 * our new access point in case we are roaming between APs.
9064 	 * This means we cannot rely on struct ieee802111_node to tell
9065 	 * us which BA sessions exist.
9066 	 */
9067 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9068 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
9069 		if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID)
9070 			continue;
9071 		err = iwm_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
9072 		if (err)
9073 			return err;
9074 		iwm_clear_reorder_buffer(sc, rxba);
9075 		if (sc->sc_rx_ba_sessions > 0)
9076 			sc->sc_rx_ba_sessions--;
9077 	}
9078 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
9079 		int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
9080 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
9081 			continue;
9082 		err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
9083 		if (err)
9084 			return err;
9085 		err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
9086 		if (err)
9087 			return err;
9088 		in->tfd_queue_msk &= ~(1 << qid);
9089 	}
9090 	ieee80211_ba_del(ni);
9091 
9092 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
9093 		iwm_led_blink_stop(sc);
9094 
9095 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
9096 	if (err)
9097 		return err;
9098 
9099 	iwm_disable_beacon_filter(sc);
9100 
9101 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
9102 		err = iwm_update_quotas(sc, in, 0);
9103 		if (err) {
9104 			printf("%s: could not update quotas (error %d)\n",
9105 			    DEVNAME(sc), err);
9106 			return err;
9107 		}
9108 	}
9109 
9110 	/* Mark station as disassociated. */
9111 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
9112 	if (err) {
9113 		printf("%s: failed to update MAC\n", DEVNAME(sc));
9114 		return err;
9115 	}
9116 
9117 	/* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
9118 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
9119 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
9120 		    in->in_phyctxt->channel, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
9121 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
9122 		if (err) {
9123 			printf("%s: failed to update PHY\n", DEVNAME(sc));
9124 			return err;
9125 		}
9126 	}
9127 
9128 	return 0;
9129 }
9130 
9131 struct ieee80211_node *
iwm_node_alloc(struct ieee80211com * ic)9132 iwm_node_alloc(struct ieee80211com *ic)
9133 {
9134 	return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
9135 }
9136 
9137 int
iwm_set_key_v1(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)9138 iwm_set_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
9139     struct ieee80211_key *k)
9140 {
9141 	struct iwm_softc *sc = ic->ic_softc;
9142 	struct iwm_add_sta_key_cmd_v1 cmd;
9143 
9144 	memset(&cmd, 0, sizeof(cmd));
9145 
9146 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
9147 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
9148 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9149 	    IWM_STA_KEY_FLG_KEYID_MSK));
9150 	if (k->k_flags & IEEE80211_KEY_GROUP)
9151 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
9152 
9153 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9154 	cmd.common.key_offset = 0;
9155 	cmd.common.sta_id = IWM_STATION_ID;
9156 
9157 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
9158 	    sizeof(cmd), &cmd);
9159 }
9160 
9161 int
iwm_set_key(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)9162 iwm_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
9163     struct ieee80211_key *k)
9164 {
9165 	struct iwm_softc *sc = ic->ic_softc;
9166 	struct iwm_add_sta_key_cmd cmd;
9167 
9168 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
9169 	    k->k_cipher != IEEE80211_CIPHER_CCMP)  {
9170 		/* Fallback to software crypto for other ciphers. */
9171 		return (ieee80211_set_key(ic, ni, k));
9172 	}
9173 
9174 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
9175 		return iwm_set_key_v1(ic, ni, k);
9176 
9177 	memset(&cmd, 0, sizeof(cmd));
9178 
9179 	cmd.common.key_flags = htole16(IWM_STA_KEY_FLG_CCM |
9180 	    IWM_STA_KEY_FLG_WEP_KEY_MAP |
9181 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9182 	    IWM_STA_KEY_FLG_KEYID_MSK));
9183 	if (k->k_flags & IEEE80211_KEY_GROUP)
9184 		cmd.common.key_flags |= htole16(IWM_STA_KEY_MULTICAST);
9185 
9186 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9187 	cmd.common.key_offset = 0;
9188 	cmd.common.sta_id = IWM_STATION_ID;
9189 
9190 	cmd.transmit_seq_cnt = htole64(k->k_tsc);
9191 
9192 	return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
9193 	    sizeof(cmd), &cmd);
9194 }
9195 
9196 void
iwm_delete_key_v1(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)9197 iwm_delete_key_v1(struct ieee80211com *ic, struct ieee80211_node *ni,
9198     struct ieee80211_key *k)
9199 {
9200 	struct iwm_softc *sc = ic->ic_softc;
9201 	struct iwm_add_sta_key_cmd_v1 cmd;
9202 
9203 	memset(&cmd, 0, sizeof(cmd));
9204 
9205 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
9206 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
9207 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9208 	    IWM_STA_KEY_FLG_KEYID_MSK));
9209 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9210 	cmd.common.key_offset = 0;
9211 	cmd.common.sta_id = IWM_STATION_ID;
9212 
9213 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9214 }
9215 
9216 void
iwm_delete_key(struct ieee80211com * ic,struct ieee80211_node * ni,struct ieee80211_key * k)9217 iwm_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
9218     struct ieee80211_key *k)
9219 {
9220 	struct iwm_softc *sc = ic->ic_softc;
9221 	struct iwm_add_sta_key_cmd cmd;
9222 
9223 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
9224 	    (k->k_cipher != IEEE80211_CIPHER_CCMP)) {
9225 		/* Fallback to software crypto for other ciphers. */
9226                 ieee80211_delete_key(ic, ni, k);
9227 		return;
9228 	}
9229 
9230 	if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
9231 		return;
9232 
9233 	if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
9234 		return iwm_delete_key_v1(ic, ni, k);
9235 
9236 	memset(&cmd, 0, sizeof(cmd));
9237 
9238 	cmd.common.key_flags = htole16(IWM_STA_KEY_NOT_VALID |
9239 	    IWM_STA_KEY_FLG_NO_ENC | IWM_STA_KEY_FLG_WEP_KEY_MAP |
9240 	    ((k->k_id << IWM_STA_KEY_FLG_KEYID_POS) &
9241 	    IWM_STA_KEY_FLG_KEYID_MSK));
9242 	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
9243 	cmd.common.key_offset = 0;
9244 	cmd.common.sta_id = IWM_STATION_ID;
9245 
9246 	iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9247 }
9248 
9249 void
iwm_calib_timeout(void * arg)9250 iwm_calib_timeout(void *arg)
9251 {
9252 	struct iwm_softc *sc = arg;
9253 	struct ieee80211com *ic = &sc->sc_ic;
9254 	struct iwm_node *in = (void *)ic->ic_bss;
9255 	struct ieee80211_node *ni = &in->in_ni;
9256 	int s;
9257 
9258 	s = splnet();
9259 	if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
9260 	    (ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
9261 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
9262 		int old_txrate = ni->ni_txrate;
9263 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
9264 		/*
9265 		 * If AMRR has chosen a new TX rate we must update
9266 		 * the firwmare's LQ rate table.
9267 		 * ni_txrate may change again before the task runs so
9268 		 * cache the chosen rate in the iwm_node structure.
9269 		 */
9270 		if (ni->ni_txrate != old_txrate)
9271 			iwm_setrates(in, 1);
9272 	}
9273 
9274 	splx(s);
9275 
9276 	timeout_add_msec(&sc->sc_calib_to, 500);
9277 }
9278 
9279 void
iwm_set_rate_table_vht(struct iwm_node * in,struct iwm_lq_cmd * lqcmd)9280 iwm_set_rate_table_vht(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
9281 {
9282 	struct ieee80211_node *ni = &in->in_ni;
9283 	struct ieee80211com *ic = ni->ni_ic;
9284 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9285 	int ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
9286 	int i, tab, txmcs;
9287 
9288 	/*
9289 	 * Fill the LQ rate selection table with VHT rates in descending
9290 	 * order, i.e. with the node's current TX rate first. Keep reducing
9291 	 * channel width during later Tx attempts, and eventually fall back
9292 	 * to legacy OFDM. Do not mix SISO and MIMO rates.
9293 	 */
9294 	lqcmd->mimo_delim = 0;
9295 	txmcs = ni->ni_txmcs;
9296 	for (i = 0; i < nitems(lqcmd->rs_table); i++) {
9297 		if (txmcs >= 0) {
9298 			tab = IWM_RATE_MCS_VHT_MSK;
9299 			tab |= txmcs & IWM_RATE_VHT_MCS_RATE_CODE_MSK;
9300 			tab |= ((ni->ni_vht_ss - 1) <<
9301 			    IWM_RATE_VHT_MCS_NSS_POS) &
9302 			    IWM_RATE_VHT_MCS_NSS_MSK;
9303 			if (ni->ni_vht_ss > 1)
9304 				tab |= IWM_RATE_MCS_ANT_AB_MSK;
9305 			else
9306 				tab |= iwm_valid_siso_ant_rate_mask(sc);
9307 
9308 			/*
9309 			 * First two Tx attempts may use 80MHz/40MHz/SGI.
9310 			 * Next two Tx attempts may use 40MHz/SGI.
9311 			 * Beyond that use 20 MHz and decrease the rate.
9312 			 * As a special case, MCS 9 is invalid on 20 Mhz.
9313 			 */
9314 			if (txmcs == 9) {
9315 				if (i < 2 && in->in_phyctxt->vht_chan_width >=
9316 				    IEEE80211_VHTOP0_CHAN_WIDTH_80)
9317 					tab |= IWM_RATE_MCS_CHAN_WIDTH_80;
9318 				else if (in->in_phyctxt->sco ==
9319 				    IEEE80211_HTOP0_SCO_SCA ||
9320 				    in->in_phyctxt->sco ==
9321 				    IEEE80211_HTOP0_SCO_SCB)
9322 					tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9323 				else {
9324 					/* no 40 MHz, fall back on MCS 8 */
9325 					tab &= ~IWM_RATE_VHT_MCS_RATE_CODE_MSK;
9326 					tab |= 8;
9327 				}
9328 
9329 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9330 				if (i < 4) {
9331 					if (ieee80211_ra_vht_use_sgi(ni))
9332 						tab |= IWM_RATE_MCS_SGI_MSK;
9333 				} else
9334 					txmcs--;
9335 			} else if (i < 2 && in->in_phyctxt->vht_chan_width >=
9336 			    IEEE80211_VHTOP0_CHAN_WIDTH_80) {
9337 				tab |= IWM_RATE_MCS_CHAN_WIDTH_80;
9338 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9339 				if (ieee80211_ra_vht_use_sgi(ni))
9340 					tab |= IWM_RATE_MCS_SGI_MSK;
9341 			} else if (i < 4 &&
9342 			    in->in_phyctxt->vht_chan_width >=
9343 			    IEEE80211_VHTOP0_CHAN_WIDTH_HT &&
9344 			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
9345 			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
9346 				tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9347 				tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9348 				if (ieee80211_ra_vht_use_sgi(ni))
9349 					tab |= IWM_RATE_MCS_SGI_MSK;
9350 			} else if (txmcs >= 0)
9351 				txmcs--;
9352 		} else {
9353 			/* Fill the rest with the lowest possible rate. */
9354 			tab = iwm_rates[ridx_min].plcp;
9355 			tab |= iwm_valid_siso_ant_rate_mask(sc);
9356 			if (ni->ni_vht_ss > 1 && lqcmd->mimo_delim == 0)
9357 				lqcmd->mimo_delim = i;
9358 		}
9359 
9360 		lqcmd->rs_table[i] = htole32(tab);
9361 	}
9362 }
9363 
9364 void
iwm_set_rate_table(struct iwm_node * in,struct iwm_lq_cmd * lqcmd)9365 iwm_set_rate_table(struct iwm_node *in, struct iwm_lq_cmd *lqcmd)
9366 {
9367 	struct ieee80211_node *ni = &in->in_ni;
9368 	struct ieee80211com *ic = ni->ni_ic;
9369 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9370 	struct ieee80211_rateset *rs = &ni->ni_rates;
9371 	int i, ridx, ridx_min, ridx_max, j, mimo, tab = 0;
9372 
9373 	/*
9374 	 * Fill the LQ rate selection table with legacy and/or HT rates
9375 	 * in descending order, i.e. with the node's current TX rate first.
9376 	 * In cases where throughput of an HT rate corresponds to a legacy
9377 	 * rate it makes no sense to add both. We rely on the fact that
9378 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
9379 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
9380 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
9381 	 */
9382 	j = 0;
9383 	ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
9384 	mimo = iwm_is_mimo_ht_mcs(ni->ni_txmcs);
9385 	ridx_max = (mimo ? IWM_RIDX_MAX : IWM_LAST_HT_SISO_RATE);
9386 	for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
9387 		uint8_t plcp = iwm_rates[ridx].plcp;
9388 		uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
9389 
9390 		if (j >= nitems(lqcmd->rs_table))
9391 			break;
9392 		tab = 0;
9393 		if (ni->ni_flags & IEEE80211_NODE_HT) {
9394 		    	if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP)
9395 				continue;
9396 	 		/* Do not mix SISO and MIMO HT rates. */
9397 			if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
9398 			    (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
9399 				continue;
9400 			for (i = ni->ni_txmcs; i >= 0; i--) {
9401 				if (isclr(ni->ni_rxmcs, i))
9402 					continue;
9403 				if (ridx != iwm_ht_mcs2ridx[i])
9404 					continue;
9405 				tab = ht_plcp;
9406 				tab |= IWM_RATE_MCS_HT_MSK;
9407 				/* First two Tx attempts may use 40MHz/SGI. */
9408 				if (j > 1)
9409 					break;
9410 				if (in->in_phyctxt->sco ==
9411 				    IEEE80211_HTOP0_SCO_SCA ||
9412 				    in->in_phyctxt->sco ==
9413 				    IEEE80211_HTOP0_SCO_SCB) {
9414 					tab |= IWM_RATE_MCS_CHAN_WIDTH_40;
9415 					tab |= IWM_RATE_MCS_RTS_REQUIRED_MSK;
9416 				}
9417 				if (ieee80211_ra_use_ht_sgi(ni))
9418 					tab |= IWM_RATE_MCS_SGI_MSK;
9419 				break;
9420 			}
9421 		} else if (plcp != IWM_RATE_INVM_PLCP) {
9422 			for (i = ni->ni_txrate; i >= 0; i--) {
9423 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
9424 				    IEEE80211_RATE_VAL)) {
9425 					tab = plcp;
9426 					break;
9427 				}
9428 			}
9429 		}
9430 
9431 		if (tab == 0)
9432 			continue;
9433 
9434 		if (iwm_is_mimo_ht_plcp(ht_plcp))
9435 			tab |= IWM_RATE_MCS_ANT_AB_MSK;
9436 		else
9437 			tab |= iwm_valid_siso_ant_rate_mask(sc);
9438 
9439 		if (IWM_RIDX_IS_CCK(ridx))
9440 			tab |= IWM_RATE_MCS_CCK_MSK;
9441 		lqcmd->rs_table[j++] = htole32(tab);
9442 	}
9443 
9444 	lqcmd->mimo_delim = (mimo ? j : 0);
9445 
9446 	/* Fill the rest with the lowest possible rate */
9447 	while (j < nitems(lqcmd->rs_table)) {
9448 		tab = iwm_rates[ridx_min].plcp;
9449 		if (IWM_RIDX_IS_CCK(ridx_min))
9450 			tab |= IWM_RATE_MCS_CCK_MSK;
9451 		tab |= iwm_valid_siso_ant_rate_mask(sc);
9452 		lqcmd->rs_table[j++] = htole32(tab);
9453 	}
9454 }
9455 
9456 void
iwm_setrates(struct iwm_node * in,int async)9457 iwm_setrates(struct iwm_node *in, int async)
9458 {
9459 	struct ieee80211_node *ni = &in->in_ni;
9460 	struct ieee80211com *ic = ni->ni_ic;
9461 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9462 	struct iwm_lq_cmd lqcmd;
9463 	struct iwm_host_cmd cmd = {
9464 		.id = IWM_LQ_CMD,
9465 		.len = { sizeof(lqcmd), },
9466 	};
9467 
9468 	cmd.flags = async ? IWM_CMD_ASYNC : 0;
9469 
9470 	memset(&lqcmd, 0, sizeof(lqcmd));
9471 	lqcmd.sta_id = IWM_STATION_ID;
9472 
9473 	if (ic->ic_flags & IEEE80211_F_USEPROT)
9474 		lqcmd.flags |= IWM_LQ_FLAG_USE_RTS_MSK;
9475 
9476 	if (ni->ni_flags & IEEE80211_NODE_VHT)
9477 		iwm_set_rate_table_vht(in, &lqcmd);
9478 	else
9479 		iwm_set_rate_table(in, &lqcmd);
9480 
9481 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000 &&
9482 	    (iwm_fw_valid_tx_ant(sc) & IWM_ANT_B))
9483 		lqcmd.single_stream_ant_msk = IWM_ANT_B;
9484 	else
9485 		lqcmd.single_stream_ant_msk = IWM_ANT_A;
9486 	lqcmd.dual_stream_ant_msk = IWM_ANT_AB;
9487 
9488 	lqcmd.agg_time_limit = htole16(4000);	/* 4ms */
9489 	lqcmd.agg_disable_start_th = 3;
9490 	lqcmd.agg_frame_cnt_limit = 0x3f;
9491 
9492 	cmd.data[0] = &lqcmd;
9493 	iwm_send_cmd(sc, &cmd);
9494 }
9495 
9496 int
iwm_media_change(struct ifnet * ifp)9497 iwm_media_change(struct ifnet *ifp)
9498 {
9499 	struct iwm_softc *sc = ifp->if_softc;
9500 	struct ieee80211com *ic = &sc->sc_ic;
9501 	uint8_t rate, ridx;
9502 	int err;
9503 
9504 	err = ieee80211_media_change(ifp);
9505 	if (err != ENETRESET)
9506 		return err;
9507 
9508 	if (ic->ic_fixed_mcs != -1)
9509 		sc->sc_fixed_ridx = iwm_ht_mcs2ridx[ic->ic_fixed_mcs];
9510 	else if (ic->ic_fixed_rate != -1) {
9511 		rate = ic->ic_sup_rates[ic->ic_curmode].
9512 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
9513 		/* Map 802.11 rate to HW rate index. */
9514 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
9515 			if (iwm_rates[ridx].rate == rate)
9516 				break;
9517 		sc->sc_fixed_ridx = ridx;
9518 	}
9519 
9520 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
9521 	    (IFF_UP | IFF_RUNNING)) {
9522 		iwm_stop(ifp);
9523 		err = iwm_init(ifp);
9524 	}
9525 	return err;
9526 }
9527 
9528 void
iwm_newstate_task(void * psc)9529 iwm_newstate_task(void *psc)
9530 {
9531 	struct iwm_softc *sc = (struct iwm_softc *)psc;
9532 	struct ieee80211com *ic = &sc->sc_ic;
9533 	enum ieee80211_state nstate = sc->ns_nstate;
9534 	enum ieee80211_state ostate = ic->ic_state;
9535 	int arg = sc->ns_arg;
9536 	int err = 0, s = splnet();
9537 
9538 	if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9539 		/* iwm_stop() is waiting for us. */
9540 		refcnt_rele_wake(&sc->task_refs);
9541 		splx(s);
9542 		return;
9543 	}
9544 
9545 	if (ostate == IEEE80211_S_SCAN) {
9546 		if (nstate == ostate) {
9547 			if (sc->sc_flags & IWM_FLAG_SCANNING) {
9548 				refcnt_rele_wake(&sc->task_refs);
9549 				splx(s);
9550 				return;
9551 			}
9552 			/* Firmware is no longer scanning. Do another scan. */
9553 			goto next_scan;
9554 		} else
9555 			iwm_led_blink_stop(sc);
9556 	}
9557 
9558 	if (nstate <= ostate) {
9559 		switch (ostate) {
9560 		case IEEE80211_S_RUN:
9561 			err = iwm_run_stop(sc);
9562 			if (err)
9563 				goto out;
9564 			/* FALLTHROUGH */
9565 		case IEEE80211_S_ASSOC:
9566 		case IEEE80211_S_AUTH:
9567 			if (nstate <= IEEE80211_S_AUTH) {
9568 				err = iwm_deauth(sc);
9569 				if (err)
9570 					goto out;
9571 			}
9572 			/* FALLTHROUGH */
9573 		case IEEE80211_S_SCAN:
9574 		case IEEE80211_S_INIT:
9575 			break;
9576 		}
9577 
9578 		/* Die now if iwm_stop() was called while we were sleeping. */
9579 		if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9580 			refcnt_rele_wake(&sc->task_refs);
9581 			splx(s);
9582 			return;
9583 		}
9584 	}
9585 
9586 	switch (nstate) {
9587 	case IEEE80211_S_INIT:
9588 		break;
9589 
9590 	case IEEE80211_S_SCAN:
9591 next_scan:
9592 		err = iwm_scan(sc);
9593 		if (err)
9594 			break;
9595 		refcnt_rele_wake(&sc->task_refs);
9596 		splx(s);
9597 		return;
9598 
9599 	case IEEE80211_S_AUTH:
9600 		err = iwm_auth(sc);
9601 		break;
9602 
9603 	case IEEE80211_S_ASSOC:
9604 		break;
9605 
9606 	case IEEE80211_S_RUN:
9607 		err = iwm_run(sc);
9608 		break;
9609 	}
9610 
9611 out:
9612 	if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
9613 		if (err)
9614 			task_add(systq, &sc->init_task);
9615 		else
9616 			sc->sc_newstate(ic, nstate, arg);
9617 	}
9618 	refcnt_rele_wake(&sc->task_refs);
9619 	splx(s);
9620 }
9621 
9622 int
iwm_newstate(struct ieee80211com * ic,enum ieee80211_state nstate,int arg)9623 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
9624 {
9625 	struct ifnet *ifp = IC2IFP(ic);
9626 	struct iwm_softc *sc = ifp->if_softc;
9627 
9628 	/*
9629 	 * Prevent attempts to transition towards the same state, unless
9630 	 * we are scanning in which case a SCAN -> SCAN transition
9631 	 * triggers another scan iteration. And AUTH -> AUTH is needed
9632 	 * to support band-steering.
9633 	 */
9634 	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
9635 	    nstate != IEEE80211_S_AUTH)
9636 		return 0;
9637 
9638 	if (ic->ic_state == IEEE80211_S_RUN) {
9639 		timeout_del(&sc->sc_calib_to);
9640 		iwm_del_task(sc, systq, &sc->ba_task);
9641 		iwm_del_task(sc, systq, &sc->mac_ctxt_task);
9642 		iwm_del_task(sc, systq, &sc->phy_ctxt_task);
9643 		iwm_del_task(sc, systq, &sc->bgscan_done_task);
9644 	}
9645 
9646 	sc->ns_nstate = nstate;
9647 	sc->ns_arg = arg;
9648 
9649 	iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
9650 
9651 	return 0;
9652 }
9653 
9654 void
iwm_endscan(struct iwm_softc * sc)9655 iwm_endscan(struct iwm_softc *sc)
9656 {
9657 	struct ieee80211com *ic = &sc->sc_ic;
9658 
9659 	if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
9660 		return;
9661 
9662 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
9663 	ieee80211_end_scan(&ic->ic_if);
9664 }
9665 
9666 /*
9667  * Aging and idle timeouts for the different possible scenarios
9668  * in default configuration
9669  */
9670 static const uint32_t
9671 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
9672 	{
9673 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
9674 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
9675 	},
9676 	{
9677 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
9678 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
9679 	},
9680 	{
9681 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
9682 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
9683 	},
9684 	{
9685 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
9686 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
9687 	},
9688 	{
9689 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
9690 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
9691 	},
9692 };
9693 
9694 /*
9695  * Aging and idle timeouts for the different possible scenarios
9696  * in single BSS MAC configuration.
9697  */
9698 static const uint32_t
9699 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
9700 	{
9701 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
9702 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
9703 	},
9704 	{
9705 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
9706 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
9707 	},
9708 	{
9709 		htole32(IWM_SF_MCAST_AGING_TIMER),
9710 		htole32(IWM_SF_MCAST_IDLE_TIMER)
9711 	},
9712 	{
9713 		htole32(IWM_SF_BA_AGING_TIMER),
9714 		htole32(IWM_SF_BA_IDLE_TIMER)
9715 	},
9716 	{
9717 		htole32(IWM_SF_TX_RE_AGING_TIMER),
9718 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
9719 	},
9720 };
9721 
9722 void
iwm_fill_sf_command(struct iwm_softc * sc,struct iwm_sf_cfg_cmd * sf_cmd,struct ieee80211_node * ni)9723 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
9724     struct ieee80211_node *ni)
9725 {
9726 	int i, j, watermark;
9727 
9728 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
9729 
9730 	/*
9731 	 * If we are in association flow - check antenna configuration
9732 	 * capabilities of the AP station, and choose the watermark accordingly.
9733 	 */
9734 	if (ni) {
9735 		if (ni->ni_flags & IEEE80211_NODE_HT) {
9736 			if (ni->ni_rxmcs[1] != 0)
9737 				watermark = IWM_SF_W_MARK_MIMO2;
9738 			else
9739 				watermark = IWM_SF_W_MARK_SISO;
9740 		} else {
9741 			watermark = IWM_SF_W_MARK_LEGACY;
9742 		}
9743 	/* default watermark value for unassociated mode. */
9744 	} else {
9745 		watermark = IWM_SF_W_MARK_MIMO2;
9746 	}
9747 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
9748 
9749 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
9750 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
9751 			sf_cmd->long_delay_timeouts[i][j] =
9752 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
9753 		}
9754 	}
9755 
9756 	if (ni) {
9757 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
9758 		       sizeof(iwm_sf_full_timeout));
9759 	} else {
9760 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
9761 		       sizeof(iwm_sf_full_timeout_def));
9762 	}
9763 
9764 }
9765 
9766 int
iwm_sf_config(struct iwm_softc * sc,int new_state)9767 iwm_sf_config(struct iwm_softc *sc, int new_state)
9768 {
9769 	struct ieee80211com *ic = &sc->sc_ic;
9770 	struct iwm_sf_cfg_cmd sf_cmd = {
9771 		.state = htole32(new_state),
9772 	};
9773 	int err = 0;
9774 
9775 #if 0	/* only used for models with sdio interface, in iwlwifi */
9776 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
9777 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
9778 #endif
9779 
9780 	switch (new_state) {
9781 	case IWM_SF_UNINIT:
9782 	case IWM_SF_INIT_OFF:
9783 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
9784 		break;
9785 	case IWM_SF_FULL_ON:
9786 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
9787 		break;
9788 	default:
9789 		return EINVAL;
9790 	}
9791 
9792 	err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
9793 				   sizeof(sf_cmd), &sf_cmd);
9794 	return err;
9795 }
9796 
9797 int
iwm_send_bt_init_conf(struct iwm_softc * sc)9798 iwm_send_bt_init_conf(struct iwm_softc *sc)
9799 {
9800 	struct iwm_bt_coex_cmd bt_cmd;
9801 
9802 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
9803 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
9804 
9805 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
9806 	    &bt_cmd);
9807 }
9808 
9809 int
iwm_send_soc_conf(struct iwm_softc * sc)9810 iwm_send_soc_conf(struct iwm_softc *sc)
9811 {
9812 	struct iwm_soc_configuration_cmd cmd;
9813 	int err;
9814 	uint32_t cmd_id, flags = 0;
9815 
9816 	memset(&cmd, 0, sizeof(cmd));
9817 
9818 	/*
9819 	 * In VER_1 of this command, the discrete value is considered
9820 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
9821 	 * values in VER_1, this is backwards-compatible with VER_2,
9822 	 * as long as we don't set any other flag bits.
9823 	 */
9824 	if (!sc->sc_integrated) { /* VER_1 */
9825 		flags = IWM_SOC_CONFIG_CMD_FLAGS_DISCRETE;
9826 	} else { /* VER_2 */
9827 		uint8_t scan_cmd_ver;
9828 		if (sc->sc_ltr_delay != IWM_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
9829 			flags |= (sc->sc_ltr_delay &
9830 			    IWM_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
9831 		scan_cmd_ver = iwm_lookup_cmd_ver(sc, IWM_LONG_GROUP,
9832 		    IWM_SCAN_REQ_UMAC);
9833 		if (scan_cmd_ver != IWM_FW_CMD_VER_UNKNOWN &&
9834 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
9835 			flags |= IWM_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
9836 	}
9837 	cmd.flags = htole32(flags);
9838 
9839 	cmd.latency = htole32(sc->sc_xtal_latency);
9840 
9841 	cmd_id = iwm_cmd_id(IWM_SOC_CONFIGURATION_CMD, IWM_SYSTEM_GROUP, 0);
9842 	err = iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
9843 	if (err)
9844 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
9845 	return err;
9846 }
9847 
9848 int
iwm_send_update_mcc_cmd(struct iwm_softc * sc,const char * alpha2)9849 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
9850 {
9851 	struct iwm_mcc_update_cmd mcc_cmd;
9852 	struct iwm_host_cmd hcmd = {
9853 		.id = IWM_MCC_UPDATE_CMD,
9854 		.flags = IWM_CMD_WANT_RESP,
9855 		.resp_pkt_len = IWM_CMD_RESP_MAX,
9856 		.data = { &mcc_cmd },
9857 	};
9858 	struct iwm_rx_packet *pkt;
9859 	size_t resp_len;
9860 	int err;
9861 	int resp_v3 = isset(sc->sc_enabled_capa,
9862 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V3);
9863 
9864 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
9865 	    !sc->sc_nvm.lar_enabled) {
9866 		return 0;
9867 	}
9868 
9869 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
9870 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
9871 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
9872 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
9873 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
9874 	else
9875 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
9876 
9877 	if (resp_v3) { /* same size as resp_v2 */
9878 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
9879 	} else {
9880 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
9881 	}
9882 
9883 	err = iwm_send_cmd(sc, &hcmd);
9884 	if (err)
9885 		return err;
9886 
9887 	pkt = hcmd.resp_pkt;
9888 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
9889 		err = EIO;
9890 		goto out;
9891 	}
9892 
9893 	if (resp_v3) {
9894 		struct iwm_mcc_update_resp_v3 *resp;
9895 		resp_len = iwm_rx_packet_payload_len(pkt);
9896 		if (resp_len < sizeof(*resp)) {
9897 			err = EIO;
9898 			goto out;
9899 		}
9900 
9901 		resp = (void *)pkt->data;
9902 		if (resp_len != sizeof(*resp) +
9903 		    resp->n_channels * sizeof(resp->channels[0])) {
9904 			err = EIO;
9905 			goto out;
9906 		}
9907 	} else {
9908 		struct iwm_mcc_update_resp_v1 *resp_v1;
9909 		resp_len = iwm_rx_packet_payload_len(pkt);
9910 		if (resp_len < sizeof(*resp_v1)) {
9911 			err = EIO;
9912 			goto out;
9913 		}
9914 
9915 		resp_v1 = (void *)pkt->data;
9916 		if (resp_len != sizeof(*resp_v1) +
9917 		    resp_v1->n_channels * sizeof(resp_v1->channels[0])) {
9918 			err = EIO;
9919 			goto out;
9920 		}
9921 	}
9922 out:
9923 	iwm_free_resp(sc, &hcmd);
9924 	return err;
9925 }
9926 
9927 int
iwm_send_temp_report_ths_cmd(struct iwm_softc * sc)9928 iwm_send_temp_report_ths_cmd(struct iwm_softc *sc)
9929 {
9930 	struct iwm_temp_report_ths_cmd cmd;
9931 	int err;
9932 
9933 	/*
9934 	 * In order to give responsibility for critical-temperature-kill
9935 	 * and TX backoff to FW we need to send an empty temperature
9936 	 * reporting command at init time.
9937 	 */
9938 	memset(&cmd, 0, sizeof(cmd));
9939 
9940 	err = iwm_send_cmd_pdu(sc,
9941 	    IWM_WIDE_ID(IWM_PHY_OPS_GROUP, IWM_TEMP_REPORTING_THRESHOLDS_CMD),
9942 	    0, sizeof(cmd), &cmd);
9943 	if (err)
9944 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
9945 		    DEVNAME(sc), err);
9946 
9947 	return err;
9948 }
9949 
9950 void
iwm_tt_tx_backoff(struct iwm_softc * sc,uint32_t backoff)9951 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
9952 {
9953 	struct iwm_host_cmd cmd = {
9954 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
9955 		.len = { sizeof(uint32_t), },
9956 		.data = { &backoff, },
9957 	};
9958 
9959 	iwm_send_cmd(sc, &cmd);
9960 }
9961 
9962 void
iwm_free_fw_paging(struct iwm_softc * sc)9963 iwm_free_fw_paging(struct iwm_softc *sc)
9964 {
9965 	int i;
9966 
9967 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
9968 		return;
9969 
9970 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
9971 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
9972 	}
9973 
9974 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
9975 }
9976 
9977 int
iwm_fill_paging_mem(struct iwm_softc * sc,const struct iwm_fw_sects * image)9978 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
9979 {
9980 	int sec_idx, idx;
9981 	uint32_t offset = 0;
9982 
9983 	/*
9984 	 * find where is the paging image start point:
9985 	 * if CPU2 exist and it's in paging format, then the image looks like:
9986 	 * CPU1 sections (2 or more)
9987 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
9988 	 * CPU2 sections (not paged)
9989 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
9990 	 * non paged to CPU2 paging sec
9991 	 * CPU2 paging CSS
9992 	 * CPU2 paging image (including instruction and data)
9993 	 */
9994 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
9995 		if (image->fw_sect[sec_idx].fws_devoff ==
9996 		    IWM_PAGING_SEPARATOR_SECTION) {
9997 			sec_idx++;
9998 			break;
9999 		}
10000 	}
10001 
10002 	/*
10003 	 * If paging is enabled there should be at least 2 more sections left
10004 	 * (one for CSS and one for Paging data)
10005 	 */
10006 	if (sec_idx >= nitems(image->fw_sect) - 1) {
10007 		printf("%s: Paging: Missing CSS and/or paging sections\n",
10008 		    DEVNAME(sc));
10009 		iwm_free_fw_paging(sc);
10010 		return EINVAL;
10011 	}
10012 
10013 	/* copy the CSS block to the dram */
10014 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
10015 	    DEVNAME(sc), sec_idx));
10016 
10017 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
10018 	    image->fw_sect[sec_idx].fws_data,
10019 	    sc->fw_paging_db[0].fw_paging_size);
10020 
10021 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
10022 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
10023 
10024 	sec_idx++;
10025 
10026 	/*
10027 	 * copy the paging blocks to the dram
10028 	 * loop index start from 1 since that CSS block already copied to dram
10029 	 * and CSS index is 0.
10030 	 * loop stop at num_of_paging_blk since that last block is not full.
10031 	 */
10032 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
10033 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
10034 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
10035 		    sc->fw_paging_db[idx].fw_paging_size);
10036 
10037 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
10038 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
10039 
10040 		offset += sc->fw_paging_db[idx].fw_paging_size;
10041 	}
10042 
10043 	/* copy the last paging block */
10044 	if (sc->num_of_pages_in_last_blk > 0) {
10045 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
10046 		    (const char *)image->fw_sect[sec_idx].fws_data + offset,
10047 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
10048 
10049 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
10050 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
10051 	}
10052 
10053 	return 0;
10054 }
10055 
10056 int
iwm_alloc_fw_paging_mem(struct iwm_softc * sc,const struct iwm_fw_sects * image)10057 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
10058 {
10059 	int blk_idx = 0;
10060 	int error, num_of_pages;
10061 
10062 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
10063 		int i;
10064 		/* Device got reset, and we setup firmware paging again */
10065 		bus_dmamap_sync(sc->sc_dmat,
10066 		    sc->fw_paging_db[0].fw_paging_block.map,
10067 		    0, IWM_FW_PAGING_SIZE,
10068 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
10069 		for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
10070 			bus_dmamap_sync(sc->sc_dmat,
10071 			    sc->fw_paging_db[i].fw_paging_block.map,
10072 			    0, IWM_PAGING_BLOCK_SIZE,
10073 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
10074 		}
10075 		return 0;
10076 	}
10077 
10078 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
10079 #if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
10080 #error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
10081 #endif
10082 
10083 	num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
10084 	sc->num_of_paging_blk =
10085 	    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
10086 
10087 	sc->num_of_pages_in_last_blk =
10088 		num_of_pages -
10089 		IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
10090 
10091 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
10092 	    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
10093 	    sc->num_of_paging_blk,
10094 	    sc->num_of_pages_in_last_blk));
10095 
10096 	/* allocate block of 4Kbytes for paging CSS */
10097 	error = iwm_dma_contig_alloc(sc->sc_dmat,
10098 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
10099 	    4096);
10100 	if (error) {
10101 		/* free all the previous pages since we failed */
10102 		iwm_free_fw_paging(sc);
10103 		return ENOMEM;
10104 	}
10105 
10106 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
10107 
10108 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
10109 	    DEVNAME(sc)));
10110 
10111 	/*
10112 	 * allocate blocks in dram.
10113 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
10114 	 */
10115 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10116 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
10117 		/* XXX Use iwm_dma_contig_alloc for allocating */
10118 		error = iwm_dma_contig_alloc(sc->sc_dmat,
10119 		     &sc->fw_paging_db[blk_idx].fw_paging_block,
10120 		    IWM_PAGING_BLOCK_SIZE, 4096);
10121 		if (error) {
10122 			/* free all the previous pages since we failed */
10123 			iwm_free_fw_paging(sc);
10124 			return ENOMEM;
10125 		}
10126 
10127 		sc->fw_paging_db[blk_idx].fw_paging_size =
10128 		    IWM_PAGING_BLOCK_SIZE;
10129 
10130 		DPRINTF((
10131 		    "%s: Paging: allocated 32K bytes for firmware paging.\n",
10132 		    DEVNAME(sc)));
10133 	}
10134 
10135 	return 0;
10136 }
10137 
10138 int
iwm_save_fw_paging(struct iwm_softc * sc,const struct iwm_fw_sects * fw)10139 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10140 {
10141 	int ret;
10142 
10143 	ret = iwm_alloc_fw_paging_mem(sc, fw);
10144 	if (ret)
10145 		return ret;
10146 
10147 	return iwm_fill_paging_mem(sc, fw);
10148 }
10149 
10150 /* send paging cmd to FW in case CPU2 has paging image */
10151 int
iwm_send_paging_cmd(struct iwm_softc * sc,const struct iwm_fw_sects * fw)10152 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10153 {
10154 	int blk_idx;
10155 	uint32_t dev_phy_addr;
10156 	struct iwm_fw_paging_cmd fw_paging_cmd = {
10157 		.flags =
10158 			htole32(IWM_PAGING_CMD_IS_SECURED |
10159 				IWM_PAGING_CMD_IS_ENABLED |
10160 				(sc->num_of_pages_in_last_blk <<
10161 				IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
10162 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
10163 		.block_num = htole32(sc->num_of_paging_blk),
10164 	};
10165 
10166 	/* loop for all paging blocks + CSS block */
10167 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10168 		dev_phy_addr = htole32(
10169 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
10170 		    IWM_PAGE_2_EXP_SIZE);
10171 		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
10172 		bus_dmamap_sync(sc->sc_dmat,
10173 		    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
10174 		    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
10175 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
10176 	}
10177 
10178 	return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
10179 					       IWM_LONG_GROUP, 0),
10180 	    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
10181 }
10182 
10183 int
iwm_init_hw(struct iwm_softc * sc)10184 iwm_init_hw(struct iwm_softc *sc)
10185 {
10186 	struct ieee80211com *ic = &sc->sc_ic;
10187 	int err, i, ac, qid, s;
10188 
10189 	err = iwm_run_init_mvm_ucode(sc, 0);
10190 	if (err)
10191 		return err;
10192 
10193 	/* Should stop and start HW since INIT image just loaded. */
10194 	iwm_stop_device(sc);
10195 	err = iwm_start_hw(sc);
10196 	if (err) {
10197 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10198 		return err;
10199 	}
10200 
10201 	/* Restart, this time with the regular firmware */
10202 	s = splnet();
10203 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
10204 	if (err) {
10205 		printf("%s: could not load firmware\n", DEVNAME(sc));
10206 		splx(s);
10207 		return err;
10208 	}
10209 
10210 	if (!iwm_nic_lock(sc)) {
10211 		splx(s);
10212 		return EBUSY;
10213 	}
10214 
10215 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
10216 	if (err) {
10217 		printf("%s: could not init tx ant config (error %d)\n",
10218 		    DEVNAME(sc), err);
10219 		goto err;
10220 	}
10221 
10222 	err = iwm_send_phy_db_data(sc);
10223 	if (err) {
10224 		printf("%s: could not init phy db (error %d)\n",
10225 		    DEVNAME(sc), err);
10226 		goto err;
10227 	}
10228 
10229 	err = iwm_send_phy_cfg_cmd(sc);
10230 	if (err) {
10231 		printf("%s: could not send phy config (error %d)\n",
10232 		    DEVNAME(sc), err);
10233 		goto err;
10234 	}
10235 
10236 	err = iwm_send_bt_init_conf(sc);
10237 	if (err) {
10238 		printf("%s: could not init bt coex (error %d)\n",
10239 		    DEVNAME(sc), err);
10240 		goto err;
10241 	}
10242 
10243 	if (isset(sc->sc_enabled_capa,
10244 	    IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
10245 		err = iwm_send_soc_conf(sc);
10246 		if (err)
10247 			goto err;
10248 	}
10249 
10250 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
10251 		err = iwm_send_dqa_cmd(sc);
10252 		if (err)
10253 			goto err;
10254 	}
10255 
10256 	/* Add auxiliary station for scanning */
10257 	err = iwm_add_aux_sta(sc);
10258 	if (err) {
10259 		printf("%s: could not add aux station (error %d)\n",
10260 		    DEVNAME(sc), err);
10261 		goto err;
10262 	}
10263 
10264 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
10265 		/*
10266 		 * The channel used here isn't relevant as it's
10267 		 * going to be overwritten in the other flows.
10268 		 * For now use the first channel we have.
10269 		 */
10270 		sc->sc_phyctxt[i].id = i;
10271 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
10272 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
10273 		    IWM_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN,
10274 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
10275 		if (err) {
10276 			printf("%s: could not add phy context %d (error %d)\n",
10277 			    DEVNAME(sc), i, err);
10278 			goto err;
10279 		}
10280 	}
10281 
10282 	/* Initialize tx backoffs to the minimum. */
10283 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
10284 		iwm_tt_tx_backoff(sc, 0);
10285 
10286 
10287 	err = iwm_config_ltr(sc);
10288 	if (err) {
10289 		printf("%s: PCIe LTR configuration failed (error %d)\n",
10290 		    DEVNAME(sc), err);
10291 	}
10292 
10293 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
10294 		err = iwm_send_temp_report_ths_cmd(sc);
10295 		if (err)
10296 			goto err;
10297 	}
10298 
10299 	err = iwm_power_update_device(sc);
10300 	if (err) {
10301 		printf("%s: could not send power command (error %d)\n",
10302 		    DEVNAME(sc), err);
10303 		goto err;
10304 	}
10305 
10306 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
10307 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
10308 		if (err) {
10309 			printf("%s: could not init LAR (error %d)\n",
10310 			    DEVNAME(sc), err);
10311 			goto err;
10312 		}
10313 	}
10314 
10315 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
10316 		err = iwm_config_umac_scan(sc);
10317 		if (err) {
10318 			printf("%s: could not configure scan (error %d)\n",
10319 			    DEVNAME(sc), err);
10320 			goto err;
10321 		}
10322 	}
10323 
10324 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10325 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
10326 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
10327 		else
10328 			qid = IWM_AUX_QUEUE;
10329 		err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
10330 		    iwm_ac_to_tx_fifo[EDCA_AC_BE], 0, IWM_MAX_TID_COUNT, 0);
10331 		if (err) {
10332 			printf("%s: could not enable monitor inject Tx queue "
10333 			    "(error %d)\n", DEVNAME(sc), err);
10334 			goto err;
10335 		}
10336 	} else {
10337 		for (ac = 0; ac < EDCA_NUM_AC; ac++) {
10338 			if (isset(sc->sc_enabled_capa,
10339 			    IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
10340 				qid = ac + IWM_DQA_MIN_MGMT_QUEUE;
10341 			else
10342 				qid = ac;
10343 			err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
10344 			    iwm_ac_to_tx_fifo[ac], 0, IWM_TID_NON_QOS, 0);
10345 			if (err) {
10346 				printf("%s: could not enable Tx queue %d "
10347 				    "(error %d)\n", DEVNAME(sc), ac, err);
10348 				goto err;
10349 			}
10350 		}
10351 	}
10352 
10353 	err = iwm_disable_beacon_filter(sc);
10354 	if (err) {
10355 		printf("%s: could not disable beacon filter (error %d)\n",
10356 		    DEVNAME(sc), err);
10357 		goto err;
10358 	}
10359 
10360 err:
10361 	iwm_nic_unlock(sc);
10362 	splx(s);
10363 	return err;
10364 }
10365 
10366 /* Allow multicast from our BSSID. */
10367 int
iwm_allow_mcast(struct iwm_softc * sc)10368 iwm_allow_mcast(struct iwm_softc *sc)
10369 {
10370 	struct ieee80211com *ic = &sc->sc_ic;
10371 	struct iwm_node *in = (void *)ic->ic_bss;
10372 	struct iwm_mcast_filter_cmd *cmd;
10373 	size_t size;
10374 	int err;
10375 
10376 	size = roundup(sizeof(*cmd), 4);
10377 	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
10378 	if (cmd == NULL)
10379 		return ENOMEM;
10380 	cmd->filter_own = 1;
10381 	cmd->port_id = 0;
10382 	cmd->count = 0;
10383 	cmd->pass_all = 1;
10384 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
10385 
10386 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
10387 	    0, size, cmd);
10388 	free(cmd, M_DEVBUF, size);
10389 	return err;
10390 }
10391 
10392 int
iwm_init(struct ifnet * ifp)10393 iwm_init(struct ifnet *ifp)
10394 {
10395 	struct iwm_softc *sc = ifp->if_softc;
10396 	struct ieee80211com *ic = &sc->sc_ic;
10397 	int err, generation;
10398 
10399 	rw_assert_wrlock(&sc->ioctl_rwl);
10400 
10401 	generation = ++sc->sc_generation;
10402 
10403 	err = iwm_preinit(sc);
10404 	if (err)
10405 		return err;
10406 
10407 	err = iwm_start_hw(sc);
10408 	if (err) {
10409 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10410 		return err;
10411 	}
10412 
10413 	err = iwm_init_hw(sc);
10414 	if (err) {
10415 		if (generation == sc->sc_generation)
10416 			iwm_stop_device(sc);
10417 		return err;
10418 	}
10419 
10420 	if (sc->sc_nvm.sku_cap_11n_enable)
10421 		iwm_setup_ht_rates(sc);
10422 	if (sc->sc_nvm.sku_cap_11ac_enable)
10423 		iwm_setup_vht_rates(sc);
10424 
10425 	KASSERT(sc->task_refs.r_refs == 0);
10426 	refcnt_init(&sc->task_refs);
10427 	ifq_clr_oactive(&ifp->if_snd);
10428 	ifp->if_flags |= IFF_RUNNING;
10429 
10430 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
10431 		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
10432 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
10433 		return 0;
10434 	}
10435 
10436 	ieee80211_begin_scan(ifp);
10437 
10438 	/*
10439 	 * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
10440 	 * Wait until the transition to SCAN state has completed.
10441 	 */
10442 	do {
10443 		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwminit",
10444 		    SEC_TO_NSEC(1));
10445 		if (generation != sc->sc_generation)
10446 			return ENXIO;
10447 		if (err) {
10448 			iwm_stop(ifp);
10449 			return err;
10450 		}
10451 	} while (ic->ic_state != IEEE80211_S_SCAN);
10452 
10453 	return 0;
10454 }
10455 
10456 void
iwm_start(struct ifnet * ifp)10457 iwm_start(struct ifnet *ifp)
10458 {
10459 	struct iwm_softc *sc = ifp->if_softc;
10460 	struct ieee80211com *ic = &sc->sc_ic;
10461 	struct ieee80211_node *ni;
10462 	struct ether_header *eh;
10463 	struct mbuf *m;
10464 	int ac = EDCA_AC_BE; /* XXX */
10465 
10466 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
10467 		return;
10468 
10469 	for (;;) {
10470 		/* why isn't this done per-queue? */
10471 		if (sc->qfullmsk != 0) {
10472 			ifq_set_oactive(&ifp->if_snd);
10473 			break;
10474 		}
10475 
10476 		/* Don't queue additional frames while flushing Tx queues. */
10477 		if (sc->sc_flags & IWM_FLAG_TXFLUSH)
10478 			break;
10479 
10480 		/* need to send management frames even if we're not RUNning */
10481 		m = mq_dequeue(&ic->ic_mgtq);
10482 		if (m) {
10483 			ni = m->m_pkthdr.ph_cookie;
10484 			goto sendit;
10485 		}
10486 
10487 		if (ic->ic_state != IEEE80211_S_RUN ||
10488 		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
10489 			break;
10490 
10491 		m = ifq_dequeue(&ifp->if_snd);
10492 		if (!m)
10493 			break;
10494 		if (m->m_len < sizeof (*eh) &&
10495 		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
10496 			ifp->if_oerrors++;
10497 			continue;
10498 		}
10499 #if NBPFILTER > 0
10500 		if (ifp->if_bpf != NULL)
10501 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
10502 #endif
10503 		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
10504 			ifp->if_oerrors++;
10505 			continue;
10506 		}
10507 
10508  sendit:
10509 #if NBPFILTER > 0
10510 		if (ic->ic_rawbpf != NULL)
10511 			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
10512 #endif
10513 		if (iwm_tx(sc, m, ni, ac) != 0) {
10514 			ieee80211_release_node(ic, ni);
10515 			ifp->if_oerrors++;
10516 			continue;
10517 		}
10518 
10519 		if (ifp->if_flags & IFF_UP)
10520 			ifp->if_timer = 1;
10521 	}
10522 
10523 	return;
10524 }
10525 
10526 void
iwm_stop(struct ifnet * ifp)10527 iwm_stop(struct ifnet *ifp)
10528 {
10529 	struct iwm_softc *sc = ifp->if_softc;
10530 	struct ieee80211com *ic = &sc->sc_ic;
10531 	struct iwm_node *in = (void *)ic->ic_bss;
10532 	int i, s = splnet();
10533 
10534 	rw_assert_wrlock(&sc->ioctl_rwl);
10535 
10536 	sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
10537 
10538 	/* Cancel scheduled tasks and let any stale tasks finish up. */
10539 	task_del(systq, &sc->init_task);
10540 	iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
10541 	iwm_del_task(sc, systq, &sc->ba_task);
10542 	iwm_del_task(sc, systq, &sc->mac_ctxt_task);
10543 	iwm_del_task(sc, systq, &sc->phy_ctxt_task);
10544 	iwm_del_task(sc, systq, &sc->bgscan_done_task);
10545 	KASSERT(sc->task_refs.r_refs >= 1);
10546 	refcnt_finalize(&sc->task_refs, "iwmstop");
10547 
10548 	iwm_stop_device(sc);
10549 
10550 	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
10551 	sc->bgscan_unref_arg = NULL;
10552 	sc->bgscan_unref_arg_size = 0;
10553 
10554 	/* Reset soft state. */
10555 
10556 	sc->sc_generation++;
10557 	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
10558 		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
10559 		sc->sc_cmd_resp_pkt[i] = NULL;
10560 		sc->sc_cmd_resp_len[i] = 0;
10561 	}
10562 	ifp->if_flags &= ~IFF_RUNNING;
10563 	ifq_clr_oactive(&ifp->if_snd);
10564 
10565 	in->in_phyctxt = NULL;
10566 	in->tid_disable_ampdu = 0xffff;
10567 	in->tfd_queue_msk = 0;
10568 	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
10569 
10570 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
10571 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
10572 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
10573 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
10574 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
10575 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
10576 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
10577 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
10578 
10579 	sc->sc_rx_ba_sessions = 0;
10580 	sc->ba_rx.start_tidmask = 0;
10581 	sc->ba_rx.stop_tidmask = 0;
10582 	sc->tx_ba_queue_mask = 0;
10583 	sc->ba_tx.start_tidmask = 0;
10584 	sc->ba_tx.stop_tidmask = 0;
10585 
10586 	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
10587 	sc->ns_nstate = IEEE80211_S_INIT;
10588 
10589 	timeout_del(&sc->sc_calib_to); /* XXX refcount? */
10590 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10591 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
10592 		iwm_clear_reorder_buffer(sc, rxba);
10593 	}
10594 	iwm_led_blink_stop(sc);
10595 	memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
10596 	ifp->if_timer = 0;
10597 
10598 	splx(s);
10599 }
10600 
10601 void
iwm_watchdog(struct ifnet * ifp)10602 iwm_watchdog(struct ifnet *ifp)
10603 {
10604 	struct iwm_softc *sc = ifp->if_softc;
10605 	int i;
10606 
10607 	ifp->if_timer = 0;
10608 
10609 	/*
10610 	 * We maintain a separate timer for each Tx queue because
10611 	 * Tx aggregation queues can get "stuck" while other queues
10612 	 * keep working. The Linux driver uses a similar workaround.
10613 	 */
10614 	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
10615 		if (sc->sc_tx_timer[i] > 0) {
10616 			if (--sc->sc_tx_timer[i] == 0) {
10617 				printf("%s: device timeout\n", DEVNAME(sc));
10618 				if (ifp->if_flags & IFF_DEBUG) {
10619 					iwm_nic_error(sc);
10620 					iwm_dump_driver_status(sc);
10621 				}
10622 				if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
10623 					task_add(systq, &sc->init_task);
10624 				ifp->if_oerrors++;
10625 				return;
10626 			}
10627 			ifp->if_timer = 1;
10628 		}
10629 	}
10630 
10631 	ieee80211_watchdog(ifp);
10632 }
10633 
10634 int
iwm_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)10635 iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
10636 {
10637 	struct iwm_softc *sc = ifp->if_softc;
10638 	int s, err = 0, generation = sc->sc_generation;
10639 
10640 	/*
10641 	 * Prevent processes from entering this function while another
10642 	 * process is tsleep'ing in it.
10643 	 */
10644 	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
10645 	if (err == 0 && generation != sc->sc_generation) {
10646 		rw_exit(&sc->ioctl_rwl);
10647 		return ENXIO;
10648 	}
10649 	if (err)
10650 		return err;
10651 	s = splnet();
10652 
10653 	switch (cmd) {
10654 	case SIOCSIFADDR:
10655 		ifp->if_flags |= IFF_UP;
10656 		/* FALLTHROUGH */
10657 	case SIOCSIFFLAGS:
10658 		if (ifp->if_flags & IFF_UP) {
10659 			if (!(ifp->if_flags & IFF_RUNNING)) {
10660 				/* Force reload of firmware image from disk. */
10661 				sc->sc_fw.fw_status = IWM_FW_STATUS_NONE;
10662 				err = iwm_init(ifp);
10663 			}
10664 		} else {
10665 			if (ifp->if_flags & IFF_RUNNING)
10666 				iwm_stop(ifp);
10667 		}
10668 		break;
10669 
10670 	default:
10671 		err = ieee80211_ioctl(ifp, cmd, data);
10672 	}
10673 
10674 	if (err == ENETRESET) {
10675 		err = 0;
10676 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
10677 		    (IFF_UP | IFF_RUNNING)) {
10678 			iwm_stop(ifp);
10679 			err = iwm_init(ifp);
10680 		}
10681 	}
10682 
10683 	splx(s);
10684 	rw_exit(&sc->ioctl_rwl);
10685 
10686 	return err;
10687 }
10688 
10689 /*
10690  * Note: This structure is read from the device with IO accesses,
10691  * and the reading already does the endian conversion. As it is
10692  * read with uint32_t-sized accesses, any members with a different size
10693  * need to be ordered correctly though!
10694  */
10695 struct iwm_error_event_table {
10696 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
10697 	uint32_t error_id;		/* type of error */
10698 	uint32_t trm_hw_status0;	/* TRM HW status */
10699 	uint32_t trm_hw_status1;	/* TRM HW status */
10700 	uint32_t blink2;		/* branch link */
10701 	uint32_t ilink1;		/* interrupt link */
10702 	uint32_t ilink2;		/* interrupt link */
10703 	uint32_t data1;		/* error-specific data */
10704 	uint32_t data2;		/* error-specific data */
10705 	uint32_t data3;		/* error-specific data */
10706 	uint32_t bcon_time;		/* beacon timer */
10707 	uint32_t tsf_low;		/* network timestamp function timer */
10708 	uint32_t tsf_hi;		/* network timestamp function timer */
10709 	uint32_t gp1;		/* GP1 timer register */
10710 	uint32_t gp2;		/* GP2 timer register */
10711 	uint32_t fw_rev_type;	/* firmware revision type */
10712 	uint32_t major;		/* uCode version major */
10713 	uint32_t minor;		/* uCode version minor */
10714 	uint32_t hw_ver;		/* HW Silicon version */
10715 	uint32_t brd_ver;		/* HW board version */
10716 	uint32_t log_pc;		/* log program counter */
10717 	uint32_t frame_ptr;		/* frame pointer */
10718 	uint32_t stack_ptr;		/* stack pointer */
10719 	uint32_t hcmd;		/* last host command header */
10720 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
10721 				 * rxtx_flag */
10722 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
10723 				 * host_flag */
10724 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
10725 				 * enc_flag */
10726 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
10727 				 * time_flag */
10728 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
10729 				 * wico interrupt */
10730 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
10731 	uint32_t wait_event;		/* wait event() caller address */
10732 	uint32_t l2p_control;	/* L2pControlField */
10733 	uint32_t l2p_duration;	/* L2pDurationField */
10734 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
10735 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
10736 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
10737 				 * (LMPM_PMG_SEL) */
10738 	uint32_t u_timestamp;	/* indicate when the date and time of the
10739 				 * compilation */
10740 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
10741 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
10742 
10743 /*
10744  * UMAC error struct - relevant starting from family 8000 chip.
10745  * Note: This structure is read from the device with IO accesses,
10746  * and the reading already does the endian conversion. As it is
10747  * read with u32-sized accesses, any members with a different size
10748  * need to be ordered correctly though!
10749  */
10750 struct iwm_umac_error_event_table {
10751 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
10752 	uint32_t error_id;	/* type of error */
10753 	uint32_t blink1;	/* branch link */
10754 	uint32_t blink2;	/* branch link */
10755 	uint32_t ilink1;	/* interrupt link */
10756 	uint32_t ilink2;	/* interrupt link */
10757 	uint32_t data1;		/* error-specific data */
10758 	uint32_t data2;		/* error-specific data */
10759 	uint32_t data3;		/* error-specific data */
10760 	uint32_t umac_major;
10761 	uint32_t umac_minor;
10762 	uint32_t frame_pointer;	/* core register 27*/
10763 	uint32_t stack_pointer;	/* core register 28 */
10764 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
10765 	uint32_t nic_isr_pref;	/* ISR status register */
10766 } __packed;
10767 
10768 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
10769 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
10770 
10771 void
iwm_nic_umac_error(struct iwm_softc * sc)10772 iwm_nic_umac_error(struct iwm_softc *sc)
10773 {
10774 	struct iwm_umac_error_event_table table;
10775 	uint32_t base;
10776 
10777 	base = sc->sc_uc.uc_umac_error_event_table;
10778 
10779 	if (base < 0x800000) {
10780 		printf("%s: Invalid error log pointer 0x%08x\n",
10781 		    DEVNAME(sc), base);
10782 		return;
10783 	}
10784 
10785 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10786 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10787 		return;
10788 	}
10789 
10790 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10791 		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
10792 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10793 			sc->sc_flags, table.valid);
10794 	}
10795 
10796 	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
10797 		iwm_desc_lookup(table.error_id));
10798 	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
10799 	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
10800 	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
10801 	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
10802 	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
10803 	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
10804 	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
10805 	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
10806 	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
10807 	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
10808 	    table.frame_pointer);
10809 	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
10810 	    table.stack_pointer);
10811 	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
10812 	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
10813 	    table.nic_isr_pref);
10814 }
10815 
10816 #define IWM_FW_SYSASSERT_CPU_MASK 0xf0000000
10817 static struct {
10818 	const char *name;
10819 	uint8_t num;
10820 } advanced_lookup[] = {
10821 	{ "NMI_INTERRUPT_WDG", 0x34 },
10822 	{ "SYSASSERT", 0x35 },
10823 	{ "UCODE_VERSION_MISMATCH", 0x37 },
10824 	{ "BAD_COMMAND", 0x38 },
10825 	{ "BAD_COMMAND", 0x39 },
10826 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
10827 	{ "FATAL_ERROR", 0x3D },
10828 	{ "NMI_TRM_HW_ERR", 0x46 },
10829 	{ "NMI_INTERRUPT_TRM", 0x4C },
10830 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
10831 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
10832 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
10833 	{ "NMI_INTERRUPT_HOST", 0x66 },
10834 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
10835 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
10836 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
10837 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
10838 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
10839 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
10840 	{ "ADVANCED_SYSASSERT", 0 },
10841 };
10842 
10843 const char *
iwm_desc_lookup(uint32_t num)10844 iwm_desc_lookup(uint32_t num)
10845 {
10846 	int i;
10847 
10848 	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
10849 		if (advanced_lookup[i].num ==
10850 		    (num & ~IWM_FW_SYSASSERT_CPU_MASK))
10851 			return advanced_lookup[i].name;
10852 
10853 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
10854 	return advanced_lookup[i].name;
10855 }
10856 
10857 /*
10858  * Support for dumping the error log seemed like a good idea ...
10859  * but it's mostly hex junk and the only sensible thing is the
10860  * hw/ucode revision (which we know anyway).  Since it's here,
10861  * I'll just leave it in, just in case e.g. the Intel guys want to
10862  * help us decipher some "ADVANCED_SYSASSERT" later.
10863  */
10864 void
iwm_nic_error(struct iwm_softc * sc)10865 iwm_nic_error(struct iwm_softc *sc)
10866 {
10867 	struct iwm_error_event_table table;
10868 	uint32_t base;
10869 
10870 	printf("%s: dumping device error log\n", DEVNAME(sc));
10871 	base = sc->sc_uc.uc_error_event_table;
10872 	if (base < 0x800000) {
10873 		printf("%s: Invalid error log pointer 0x%08x\n",
10874 		    DEVNAME(sc), base);
10875 		return;
10876 	}
10877 
10878 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10879 		printf("%s: reading errlog failed\n", DEVNAME(sc));
10880 		return;
10881 	}
10882 
10883 	if (!table.valid) {
10884 		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
10885 		return;
10886 	}
10887 
10888 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
10889 		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
10890 		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10891 		    sc->sc_flags, table.valid);
10892 	}
10893 
10894 	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
10895 	    iwm_desc_lookup(table.error_id));
10896 	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
10897 	    table.trm_hw_status0);
10898 	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
10899 	    table.trm_hw_status1);
10900 	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
10901 	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
10902 	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
10903 	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
10904 	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
10905 	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
10906 	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
10907 	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
10908 	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
10909 	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
10910 	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
10911 	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
10912 	    table.fw_rev_type);
10913 	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
10914 	    table.major);
10915 	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
10916 	    table.minor);
10917 	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
10918 	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
10919 	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
10920 	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
10921 	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
10922 	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
10923 	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
10924 	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
10925 	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
10926 	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
10927 	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
10928 	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
10929 	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
10930 	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
10931 	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
10932 	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
10933 	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
10934 
10935 	if (sc->sc_uc.uc_umac_error_event_table)
10936 		iwm_nic_umac_error(sc);
10937 }
10938 
10939 void
iwm_dump_driver_status(struct iwm_softc * sc)10940 iwm_dump_driver_status(struct iwm_softc *sc)
10941 {
10942 	int i;
10943 
10944 	printf("driver status:\n");
10945 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
10946 		struct iwm_tx_ring *ring = &sc->txq[i];
10947 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
10948 		    "queued=%-3d\n",
10949 		    i, ring->qid, ring->cur, ring->queued);
10950 	}
10951 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
10952 	printf("  802.11 state %s\n",
10953 	    ieee80211_state_name[sc->sc_ic.ic_state]);
10954 }
10955 
10956 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
10957 do {									\
10958 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10959 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
10960 	_var_ = (void *)((_pkt_)+1);					\
10961 } while (/*CONSTCOND*/0)
10962 
10963 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
10964 do {									\
10965 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
10966 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
10967 	_ptr_ = (void *)((_pkt_)+1);					\
10968 } while (/*CONSTCOND*/0)
10969 
10970 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count);
10971 
10972 int
iwm_rx_pkt_valid(struct iwm_rx_packet * pkt)10973 iwm_rx_pkt_valid(struct iwm_rx_packet *pkt)
10974 {
10975 	int qid, idx, code;
10976 
10977 	qid = pkt->hdr.qid & ~0x80;
10978 	idx = pkt->hdr.idx;
10979 	code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
10980 
10981 	return (!(qid == 0 && idx == 0 && code == 0) &&
10982 	    pkt->len_n_flags != htole32(IWM_FH_RSCSR_FRAME_INVALID));
10983 }
10984 
10985 void
iwm_rx_pkt(struct iwm_softc * sc,struct iwm_rx_data * data,struct mbuf_list * ml)10986 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
10987 {
10988 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
10989 	struct iwm_rx_packet *pkt, *nextpkt;
10990 	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
10991 	struct mbuf *m0, *m;
10992 	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
10993 	int qid, idx, code, handled = 1;
10994 
10995 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
10996 	    BUS_DMASYNC_POSTREAD);
10997 
10998 	m0 = data->m;
10999 	while (m0 && offset + minsz < IWM_RBUF_SIZE) {
11000 		pkt = (struct iwm_rx_packet *)(m0->m_data + offset);
11001 		qid = pkt->hdr.qid;
11002 		idx = pkt->hdr.idx;
11003 
11004 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
11005 
11006 		if (!iwm_rx_pkt_valid(pkt))
11007 			break;
11008 
11009 		len = sizeof(pkt->len_n_flags) + iwm_rx_packet_len(pkt);
11010 		if (len < minsz || len > (IWM_RBUF_SIZE - offset))
11011 			break;
11012 
11013 		if (code == IWM_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
11014 			/* Take mbuf m0 off the RX ring. */
11015 			if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
11016 				ifp->if_ierrors++;
11017 				break;
11018 			}
11019 			KASSERT(data->m != m0);
11020 		}
11021 
11022 		switch (code) {
11023 		case IWM_REPLY_RX_PHY_CMD:
11024 			iwm_rx_rx_phy_cmd(sc, pkt, data);
11025 			break;
11026 
11027 		case IWM_REPLY_RX_MPDU_CMD: {
11028 			size_t maxlen = IWM_RBUF_SIZE - offset - minsz;
11029 			nextoff = offset +
11030 			    roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
11031 			nextpkt = (struct iwm_rx_packet *)
11032 			    (m0->m_data + nextoff);
11033 			if (nextoff + minsz >= IWM_RBUF_SIZE ||
11034 			    !iwm_rx_pkt_valid(nextpkt)) {
11035 				/* No need to copy last frame in buffer. */
11036 				if (offset > 0)
11037 					m_adj(m0, offset);
11038 				if (sc->sc_mqrx_supported)
11039 					iwm_rx_mpdu_mq(sc, m0, pkt->data,
11040 					    maxlen, ml);
11041 				else
11042 					iwm_rx_mpdu(sc, m0, pkt->data,
11043 					    maxlen, ml);
11044 				m0 = NULL; /* stack owns m0 now; abort loop */
11045 			} else {
11046 				/*
11047 				 * Create an mbuf which points to the current
11048 				 * packet. Always copy from offset zero to
11049 				 * preserve m_pkthdr.
11050 				 */
11051 				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
11052 				if (m == NULL) {
11053 					ifp->if_ierrors++;
11054 					m_freem(m0);
11055 					m0 = NULL;
11056 					break;
11057 				}
11058 				m_adj(m, offset);
11059 				if (sc->sc_mqrx_supported)
11060 					iwm_rx_mpdu_mq(sc, m, pkt->data,
11061 					    maxlen, ml);
11062 				else
11063 					iwm_rx_mpdu(sc, m, pkt->data,
11064 					    maxlen, ml);
11065 			}
11066  			break;
11067 		}
11068 
11069 		case IWM_TX_CMD:
11070 			iwm_rx_tx_cmd(sc, pkt, data);
11071 			break;
11072 
11073 		case IWM_BA_NOTIF:
11074 			iwm_rx_compressed_ba(sc, pkt);
11075 			break;
11076 
11077 		case IWM_MISSED_BEACONS_NOTIFICATION:
11078 			iwm_rx_bmiss(sc, pkt, data);
11079 			break;
11080 
11081 		case IWM_MFUART_LOAD_NOTIFICATION:
11082 			break;
11083 
11084 		case IWM_ALIVE: {
11085 			struct iwm_alive_resp_v1 *resp1;
11086 			struct iwm_alive_resp_v2 *resp2;
11087 			struct iwm_alive_resp_v3 *resp3;
11088 
11089 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
11090 				SYNC_RESP_STRUCT(resp1, pkt);
11091 				sc->sc_uc.uc_error_event_table
11092 				    = le32toh(resp1->error_event_table_ptr);
11093 				sc->sc_uc.uc_log_event_table
11094 				    = le32toh(resp1->log_event_table_ptr);
11095 				sc->sched_base = le32toh(resp1->scd_base_ptr);
11096 				if (resp1->status == IWM_ALIVE_STATUS_OK)
11097 					sc->sc_uc.uc_ok = 1;
11098 				else
11099 					sc->sc_uc.uc_ok = 0;
11100 			}
11101 
11102 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
11103 				SYNC_RESP_STRUCT(resp2, pkt);
11104 				sc->sc_uc.uc_error_event_table
11105 				    = le32toh(resp2->error_event_table_ptr);
11106 				sc->sc_uc.uc_log_event_table
11107 				    = le32toh(resp2->log_event_table_ptr);
11108 				sc->sched_base = le32toh(resp2->scd_base_ptr);
11109 				sc->sc_uc.uc_umac_error_event_table
11110 				    = le32toh(resp2->error_info_addr);
11111 				if (resp2->status == IWM_ALIVE_STATUS_OK)
11112 					sc->sc_uc.uc_ok = 1;
11113 				else
11114 					sc->sc_uc.uc_ok = 0;
11115 			}
11116 
11117 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
11118 				SYNC_RESP_STRUCT(resp3, pkt);
11119 				sc->sc_uc.uc_error_event_table
11120 				    = le32toh(resp3->error_event_table_ptr);
11121 				sc->sc_uc.uc_log_event_table
11122 				    = le32toh(resp3->log_event_table_ptr);
11123 				sc->sched_base = le32toh(resp3->scd_base_ptr);
11124 				sc->sc_uc.uc_umac_error_event_table
11125 				    = le32toh(resp3->error_info_addr);
11126 				if (resp3->status == IWM_ALIVE_STATUS_OK)
11127 					sc->sc_uc.uc_ok = 1;
11128 				else
11129 					sc->sc_uc.uc_ok = 0;
11130 			}
11131 
11132 			sc->sc_uc.uc_intr = 1;
11133 			wakeup(&sc->sc_uc);
11134 			break;
11135 		}
11136 
11137 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
11138 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
11139 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
11140 			iwm_phy_db_set_section(sc, phy_db_notif);
11141 			sc->sc_init_complete |= IWM_CALIB_COMPLETE;
11142 			wakeup(&sc->sc_init_complete);
11143 			break;
11144 		}
11145 
11146 		case IWM_STATISTICS_NOTIFICATION: {
11147 			struct iwm_notif_statistics *stats;
11148 			SYNC_RESP_STRUCT(stats, pkt);
11149 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
11150 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
11151 			break;
11152 		}
11153 
11154 		case IWM_MCC_CHUB_UPDATE_CMD: {
11155 			struct iwm_mcc_chub_notif *notif;
11156 			SYNC_RESP_STRUCT(notif, pkt);
11157 			iwm_mcc_update(sc, notif);
11158 			break;
11159 		}
11160 
11161 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
11162 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11163 				 IWM_DTS_MEASUREMENT_NOTIF_WIDE):
11164 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11165 				 IWM_TEMP_REPORTING_THRESHOLDS_CMD):
11166 			break;
11167 
11168 		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
11169 		    IWM_CT_KILL_NOTIFICATION): {
11170 			struct iwm_ct_kill_notif *notif;
11171 			SYNC_RESP_STRUCT(notif, pkt);
11172 			printf("%s: device at critical temperature (%u degC), "
11173 			    "stopping device\n",
11174 			    DEVNAME(sc), le16toh(notif->temperature));
11175 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11176 			task_add(systq, &sc->init_task);
11177 			break;
11178 		}
11179 
11180 		case IWM_ADD_STA_KEY:
11181 		case IWM_PHY_CONFIGURATION_CMD:
11182 		case IWM_TX_ANT_CONFIGURATION_CMD:
11183 		case IWM_ADD_STA:
11184 		case IWM_MAC_CONTEXT_CMD:
11185 		case IWM_REPLY_SF_CFG_CMD:
11186 		case IWM_POWER_TABLE_CMD:
11187 		case IWM_LTR_CONFIG:
11188 		case IWM_PHY_CONTEXT_CMD:
11189 		case IWM_BINDING_CONTEXT_CMD:
11190 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD):
11191 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC):
11192 		case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
11193 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
11194 		case IWM_SCAN_OFFLOAD_ABORT_CMD:
11195 		case IWM_REPLY_BEACON_FILTERING_CMD:
11196 		case IWM_MAC_PM_POWER_TABLE:
11197 		case IWM_TIME_QUOTA_CMD:
11198 		case IWM_REMOVE_STA:
11199 		case IWM_TXPATH_FLUSH:
11200 		case IWM_LQ_CMD:
11201 		case IWM_WIDE_ID(IWM_LONG_GROUP,
11202 				 IWM_FW_PAGING_BLOCK_CMD):
11203 		case IWM_BT_CONFIG:
11204 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
11205 		case IWM_NVM_ACCESS_CMD:
11206 		case IWM_MCC_UPDATE_CMD:
11207 		case IWM_TIME_EVENT_CMD: {
11208 			size_t pkt_len;
11209 
11210 			if (sc->sc_cmd_resp_pkt[idx] == NULL)
11211 				break;
11212 
11213 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
11214 			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
11215 
11216 			pkt_len = sizeof(pkt->len_n_flags) +
11217 			    iwm_rx_packet_len(pkt);
11218 
11219 			if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
11220 			    pkt_len < sizeof(*pkt) ||
11221 			    pkt_len > sc->sc_cmd_resp_len[idx]) {
11222 				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
11223 				    sc->sc_cmd_resp_len[idx]);
11224 				sc->sc_cmd_resp_pkt[idx] = NULL;
11225 				break;
11226 			}
11227 
11228 			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
11229 			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
11230 			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
11231 			break;
11232 		}
11233 
11234 		/* ignore */
11235 		case IWM_PHY_DB_CMD:
11236 			break;
11237 
11238 		case IWM_INIT_COMPLETE_NOTIF:
11239 			sc->sc_init_complete |= IWM_INIT_COMPLETE;
11240 			wakeup(&sc->sc_init_complete);
11241 			break;
11242 
11243 		case IWM_SCAN_OFFLOAD_COMPLETE: {
11244 			struct iwm_periodic_scan_complete *notif;
11245 			SYNC_RESP_STRUCT(notif, pkt);
11246 			break;
11247 		}
11248 
11249 		case IWM_SCAN_ITERATION_COMPLETE: {
11250 			struct iwm_lmac_scan_complete_notif *notif;
11251 			SYNC_RESP_STRUCT(notif, pkt);
11252 			iwm_endscan(sc);
11253 			break;
11254 		}
11255 
11256 		case IWM_SCAN_COMPLETE_UMAC: {
11257 			struct iwm_umac_scan_complete *notif;
11258 			SYNC_RESP_STRUCT(notif, pkt);
11259 			iwm_endscan(sc);
11260 			break;
11261 		}
11262 
11263 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
11264 			struct iwm_umac_scan_iter_complete_notif *notif;
11265 			SYNC_RESP_STRUCT(notif, pkt);
11266 			iwm_endscan(sc);
11267 			break;
11268 		}
11269 
11270 		case IWM_REPLY_ERROR: {
11271 			struct iwm_error_resp *resp;
11272 			SYNC_RESP_STRUCT(resp, pkt);
11273 			printf("%s: firmware error 0x%x, cmd 0x%x\n",
11274 				DEVNAME(sc), le32toh(resp->error_type),
11275 				resp->cmd_id);
11276 			break;
11277 		}
11278 
11279 		case IWM_TIME_EVENT_NOTIFICATION: {
11280 			struct iwm_time_event_notif *notif;
11281 			uint32_t action;
11282 			SYNC_RESP_STRUCT(notif, pkt);
11283 
11284 			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
11285 				break;
11286 			action = le32toh(notif->action);
11287 			if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
11288 				sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
11289 			break;
11290 		}
11291 
11292 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
11293 		    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
11294 		    break;
11295 
11296 		/*
11297 		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
11298 		 * messages. Just ignore them for now.
11299 		 */
11300 		case IWM_DEBUG_LOG_MSG:
11301 			break;
11302 
11303 		case IWM_MCAST_FILTER_CMD:
11304 			break;
11305 
11306 		case IWM_SCD_QUEUE_CFG: {
11307 			struct iwm_scd_txq_cfg_rsp *rsp;
11308 			SYNC_RESP_STRUCT(rsp, pkt);
11309 
11310 			break;
11311 		}
11312 
11313 		case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
11314 			break;
11315 
11316 		case IWM_WIDE_ID(IWM_SYSTEM_GROUP, IWM_SOC_CONFIGURATION_CMD):
11317 			break;
11318 
11319 		default:
11320 			handled = 0;
11321 			printf("%s: unhandled firmware response 0x%x/0x%x "
11322 			    "rx ring %d[%d]\n",
11323 			    DEVNAME(sc), code, pkt->len_n_flags,
11324 			    (qid & ~0x80), idx);
11325 			break;
11326 		}
11327 
11328 		/*
11329 		 * uCode sets bit 0x80 when it originates the notification,
11330 		 * i.e. when the notification is not a direct response to a
11331 		 * command sent by the driver.
11332 		 * For example, uCode issues IWM_REPLY_RX when it sends a
11333 		 * received frame to the driver.
11334 		 */
11335 		if (handled && !(qid & (1 << 7))) {
11336 			iwm_cmd_done(sc, qid, idx, code);
11337 		}
11338 
11339 		offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
11340 	}
11341 
11342 	if (m0 && m0 != data->m)
11343 		m_freem(m0);
11344 }
11345 
11346 void
iwm_notif_intr(struct iwm_softc * sc)11347 iwm_notif_intr(struct iwm_softc *sc)
11348 {
11349 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
11350 	uint32_t wreg;
11351 	uint16_t hw;
11352 	int count;
11353 
11354 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
11355 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
11356 
11357 	if (sc->sc_mqrx_supported) {
11358 		count = IWM_RX_MQ_RING_COUNT;
11359 		wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
11360 	} else {
11361 		count = IWM_RX_RING_COUNT;
11362 		wreg = IWM_FH_RSCSR_CHNL0_WPTR;
11363 	}
11364 
11365 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
11366 	hw &= (count - 1);
11367 	while (sc->rxq.cur != hw) {
11368 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
11369 		iwm_rx_pkt(sc, data, &ml);
11370 		ADVANCE_RXQ(sc);
11371 	}
11372 	if_input(&sc->sc_ic.ic_if, &ml);
11373 
11374 	/*
11375 	 * Tell the firmware what we have processed.
11376 	 * Seems like the hardware gets upset unless we align the write by 8??
11377 	 */
11378 	hw = (hw == 0) ? count - 1 : hw - 1;
11379 	IWM_WRITE(sc, wreg, hw & ~7);
11380 }
11381 
11382 int
iwm_intr(void * arg)11383 iwm_intr(void *arg)
11384 {
11385 	struct iwm_softc *sc = arg;
11386 	struct ieee80211com *ic = &sc->sc_ic;
11387 	struct ifnet *ifp = IC2IFP(ic);
11388 	int handled = 0;
11389 	int rv = 0;
11390 	uint32_t r1, r2;
11391 
11392 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
11393 
11394 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
11395 		uint32_t *ict = sc->ict_dma.vaddr;
11396 		int tmp;
11397 
11398 		tmp = htole32(ict[sc->ict_cur]);
11399 		if (!tmp)
11400 			goto out_ena;
11401 
11402 		/*
11403 		 * ok, there was something.  keep plowing until we have all.
11404 		 */
11405 		r1 = r2 = 0;
11406 		while (tmp) {
11407 			r1 |= tmp;
11408 			ict[sc->ict_cur] = 0;
11409 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
11410 			tmp = htole32(ict[sc->ict_cur]);
11411 		}
11412 
11413 		/* this is where the fun begins.  don't ask */
11414 		if (r1 == 0xffffffff)
11415 			r1 = 0;
11416 
11417 		/*
11418 		 * Workaround for hardware bug where bits are falsely cleared
11419 		 * when using interrupt coalescing.  Bit 15 should be set if
11420 		 * bits 18 and 19 are set.
11421 		 */
11422 		if (r1 & 0xc0000)
11423 			r1 |= 0x8000;
11424 
11425 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
11426 	} else {
11427 		r1 = IWM_READ(sc, IWM_CSR_INT);
11428 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
11429 	}
11430 	if (r1 == 0 && r2 == 0) {
11431 		goto out_ena;
11432 	}
11433 	if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
11434 		goto out;
11435 
11436 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
11437 
11438 	/* ignored */
11439 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
11440 
11441 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
11442 		handled |= IWM_CSR_INT_BIT_RF_KILL;
11443 		iwm_check_rfkill(sc);
11444 		task_add(systq, &sc->init_task);
11445 		rv = 1;
11446 		goto out_ena;
11447 	}
11448 
11449 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
11450 		if (ifp->if_flags & IFF_DEBUG) {
11451 			iwm_nic_error(sc);
11452 			iwm_dump_driver_status(sc);
11453 		}
11454 		printf("%s: fatal firmware error\n", DEVNAME(sc));
11455 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11456 			task_add(systq, &sc->init_task);
11457 		rv = 1;
11458 		goto out;
11459 
11460 	}
11461 
11462 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
11463 		handled |= IWM_CSR_INT_BIT_HW_ERR;
11464 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11465 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11466 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11467 			task_add(systq, &sc->init_task);
11468 		}
11469 		rv = 1;
11470 		goto out;
11471 	}
11472 
11473 	/* firmware chunk loaded */
11474 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
11475 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
11476 		handled |= IWM_CSR_INT_BIT_FH_TX;
11477 
11478 		sc->sc_fw_chunk_done = 1;
11479 		wakeup(&sc->sc_fw);
11480 	}
11481 
11482 	if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX |
11483 	    IWM_CSR_INT_BIT_RX_PERIODIC)) {
11484 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) {
11485 			handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
11486 			IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
11487 		}
11488 		if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
11489 			handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
11490 			IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
11491 		}
11492 
11493 		/* Disable periodic interrupt; we use it as just a one-shot. */
11494 		IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
11495 
11496 		/*
11497 		 * Enable periodic interrupt in 8 msec only if we received
11498 		 * real RX interrupt (instead of just periodic int), to catch
11499 		 * any dangling Rx interrupt.  If it was just the periodic
11500 		 * interrupt, there was no dangling Rx activity, and no need
11501 		 * to extend the periodic interrupt; one-shot is enough.
11502 		 */
11503 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX))
11504 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
11505 			    IWM_CSR_INT_PERIODIC_ENA);
11506 
11507 		iwm_notif_intr(sc);
11508 	}
11509 
11510 	rv = 1;
11511 
11512  out_ena:
11513 	iwm_restore_interrupts(sc);
11514  out:
11515 	return rv;
11516 }
11517 
11518 int
iwm_intr_msix(void * arg)11519 iwm_intr_msix(void *arg)
11520 {
11521 	struct iwm_softc *sc = arg;
11522 	struct ieee80211com *ic = &sc->sc_ic;
11523 	struct ifnet *ifp = IC2IFP(ic);
11524 	uint32_t inta_fh, inta_hw;
11525 	int vector = 0;
11526 
11527 	inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD);
11528 	inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD);
11529 	IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
11530 	IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
11531 	inta_fh &= sc->sc_fh_mask;
11532 	inta_hw &= sc->sc_hw_mask;
11533 
11534 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_Q0 ||
11535 	    inta_fh & IWM_MSIX_FH_INT_CAUSES_Q1) {
11536 		iwm_notif_intr(sc);
11537 	}
11538 
11539 	/* firmware chunk loaded */
11540 	if (inta_fh & IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
11541 		sc->sc_fw_chunk_done = 1;
11542 		wakeup(&sc->sc_fw);
11543 	}
11544 
11545 	if ((inta_fh & IWM_MSIX_FH_INT_CAUSES_FH_ERR) ||
11546 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
11547 	    (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
11548 		if (ifp->if_flags & IFF_DEBUG) {
11549 			iwm_nic_error(sc);
11550 			iwm_dump_driver_status(sc);
11551 		}
11552 		printf("%s: fatal firmware error\n", DEVNAME(sc));
11553 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11554 			task_add(systq, &sc->init_task);
11555 		return 1;
11556 	}
11557 
11558 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
11559 		iwm_check_rfkill(sc);
11560 		task_add(systq, &sc->init_task);
11561 	}
11562 
11563 	if (inta_hw & IWM_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
11564 		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11565 		if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11566 			sc->sc_flags |= IWM_FLAG_HW_ERR;
11567 			task_add(systq, &sc->init_task);
11568 		}
11569 		return 1;
11570 	}
11571 
11572 	/*
11573 	 * Before sending the interrupt the HW disables it to prevent
11574 	 * a nested interrupt. This is done by writing 1 to the corresponding
11575 	 * bit in the mask register. After handling the interrupt, it should be
11576 	 * re-enabled by clearing this bit. This register is defined as
11577 	 * write 1 clear (W1C) register, meaning that it's being clear
11578 	 * by writing 1 to the bit.
11579 	 */
11580 	IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
11581 	return 1;
11582 }
11583 
11584 typedef void *iwm_match_t;
11585 
11586 static const struct pci_matchid iwm_devices[] = {
11587 #ifdef __FreeBSD_version
11588 #define	PCI_VENDOR_INTEL			0x8086
11589 #define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
11590 #define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
11591 #define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
11592 #define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
11593 #define	PCI_PRODUCT_INTEL_WL_3168_1	0x24fb
11594 #define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
11595 #define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
11596 #define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
11597 #define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
11598 #define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
11599 #define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
11600 #define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
11601 #define	PCI_PRODUCT_INTEL_WL_9560_1	0x9df0
11602 #define	PCI_PRODUCT_INTEL_WL_9560_2	0xa370
11603 #define	PCI_PRODUCT_INTEL_WL_9560_3	0x31dc
11604 #define	PCI_PRODUCT_INTEL_WL_9260_1	0x2526
11605 #endif
11606 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
11607 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
11608 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
11609 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
11610 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
11611 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
11612 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
11613 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
11614 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
11615 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
11616 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
11617 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
11618 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9260_1 },
11619 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_1 },
11620 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_2 },
11621 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_9560_3 },
11622 };
11623 
11624 #ifdef __FreeBSD_version
11625 static int
iwm_probe(device_t dev)11626 iwm_probe(device_t dev)
11627 {
11628 	int i;
11629 
11630 	for (i = 0; i < nitems(iwm_devices); i++) {
11631 		if (pci_get_vendor(dev) == iwm_devices[i].pm_vid &&
11632 			pci_get_device(dev) == iwm_devices[i].pm_pid) {
11633 			return (BUS_PROBE_DEFAULT);
11634 		}
11635 	}
11636 
11637 	return (ENXIO);
11638 }
11639 #else
11640 int
iwm_match(struct device * parent,iwm_match_t match __unused,void * aux)11641 iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
11642 {
11643 	return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
11644 	    nitems(iwm_devices));
11645 }
11646 #endif
11647 
11648 int
iwm_preinit(struct iwm_softc * sc)11649 iwm_preinit(struct iwm_softc *sc)
11650 {
11651 	struct ieee80211com *ic = &sc->sc_ic;
11652 	struct ifnet *ifp = IC2IFP(ic);
11653 	int err;
11654 
11655 	err = iwm_prepare_card_hw(sc);
11656 	if (err) {
11657 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11658 		return err;
11659 	}
11660 
11661 	if (sc->attached) {
11662 #ifndef __FreeBSD_version
11663 		/* Update MAC in case the upper layers changed it. */
11664 		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
11665 		    ((struct arpcom *)ifp)->ac_enaddr);
11666 #endif
11667 		return 0;
11668 	}
11669 
11670 	err = iwm_start_hw(sc);
11671 	if (err) {
11672 		printf("%s: could not initialize hardware\n", DEVNAME(sc));
11673 		return err;
11674 	}
11675 
11676 	err = iwm_run_init_mvm_ucode(sc, 1);
11677 	iwm_stop_device(sc);
11678 	if (err)
11679 		return err;
11680 
11681 	/* Print version info and MAC address on first successful fw load. */
11682 	sc->attached = 1;
11683 	printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
11684 	    DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
11685 	    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
11686 
11687 	if (sc->sc_nvm.sku_cap_11n_enable)
11688 		iwm_setup_ht_rates(sc);
11689 
11690 	/* not all hardware can do 5GHz band */
11691 	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
11692 		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
11693 		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
11694 
11695 	/* Configure channel information obtained from firmware. */
11696 	ieee80211_channel_init(ifp);
11697 
11698 #ifdef __HAIKU__
11699 	IEEE80211_ADDR_COPY(IF_LLADDR(ifp), ic->ic_myaddr);
11700 #else
11701 	/* Configure MAC address. */
11702 	err = if_setlladdr(ifp, ic->ic_myaddr);
11703 	if (err)
11704 		printf("%s: could not set MAC address (error %d)\n",
11705 		    DEVNAME(sc), err);
11706 #endif
11707 
11708 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
11709 
11710 	return 0;
11711 }
11712 
11713 void
iwm_attach_hook(struct device * self)11714 iwm_attach_hook(struct device *self)
11715 {
11716 	struct iwm_softc *sc = (void *)self;
11717 
11718 	KASSERT(!cold);
11719 
11720 	iwm_preinit(sc);
11721 }
11722 
11723 #ifdef __FreeBSD_version
11724 static int
iwm_attach(device_t dev)11725 iwm_attach(device_t dev)
11726 #else
11727 void
11728 iwm_attach(struct device *parent, struct device *self, void *aux)
11729 #endif
11730 {
11731 #ifdef __FreeBSD_version
11732 #define pa dev
11733 	struct iwm_softc *sc = device_get_softc(dev);
11734 #else
11735 	struct iwm_softc *sc = (void *)self;
11736 	struct pci_attach_args *pa = aux;
11737 #endif
11738 	pci_intr_handle_t ih;
11739 	pcireg_t reg, memtype;
11740 	struct ieee80211com *ic = &sc->sc_ic;
11741 	struct ifnet *ifp = &ic->ic_if;
11742 	const char *intrstr;
11743 	int err;
11744 	int txq_i, i, j;
11745 
11746 #ifdef __FreeBSD_version
11747 	sc->sc_dev = dev;
11748 	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
11749 	bus_dma_tag_create(sc->sc_dmat, 1, 0,
11750 		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
11751 		BUS_SPACE_MAXSIZE_32BIT, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
11752 		&sc->sc_dmat);
11753 	pci_enable_busmaster(sc->sc_dev);
11754 
11755 	if_alloc_inplace(ifp, IFT_ETHER);
11756 #else
11757 	sc->sc_pct = pa->pa_pc;
11758 	sc->sc_pcitag = pa->pa_tag;
11759 	sc->sc_dmat = pa->pa_dmat;
11760 #endif
11761 
11762 	rw_init(&sc->ioctl_rwl, "iwmioctl");
11763 
11764 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
11765 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
11766 	if (err == 0) {
11767 		printf("%s: PCIe capability structure not found!\n",
11768 		    DEVNAME(sc));
11769 		goto fail;
11770 	}
11771 
11772 	/*
11773 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
11774 	 * PCI Tx retries from interfering with C3 CPU state.
11775 	 */
11776 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11777 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11778 
11779 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
11780 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
11781 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
11782 	if (err) {
11783 		printf("%s: can't map mem space\n", DEVNAME(sc));
11784 		goto fail;
11785 	}
11786 
11787 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
11788 		sc->sc_msix = 1;
11789 	} else if (pci_intr_map_msi(pa, &ih)) {
11790 #ifndef __HAIKU__
11791 		if (pci_intr_map(pa, &ih)) {
11792 #else
11793 		{
11794 #endif
11795 			printf("%s: can't map interrupt\n", DEVNAME(sc));
11796 			goto fail;
11797 		}
11798 		/* Hardware bug workaround. */
11799 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11800 		    PCI_COMMAND_STATUS_REG);
11801 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11802 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11803 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11804 		    PCI_COMMAND_STATUS_REG, reg);
11805 	}
11806 
11807 	intrstr = pci_intr_string(sc->sc_pct, ih);
11808 	if (sc->sc_msix)
11809 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11810 		    iwm_intr_msix, sc, DEVNAME(sc));
11811 	else
11812 		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11813 		    iwm_intr, sc, DEVNAME(sc));
11814 
11815 	if (sc->sc_ih == NULL) {
11816 		printf("\n");
11817 		printf("%s: can't establish interrupt", DEVNAME(sc));
11818 		if (intrstr != NULL)
11819 			printf(" at %s", intrstr);
11820 		printf("\n");
11821 		goto fail;
11822 	}
11823 	printf(", %s\n", intrstr);
11824 
11825 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
11826 #ifdef __FreeBSD_version
11827 #undef PCI_PRODUCT
11828 #define PCI_PRODUCT(pa) pci_get_device(dev)
11829 #endif
11830 	switch (PCI_PRODUCT(pa->pa_id)) {
11831 	case PCI_PRODUCT_INTEL_WL_3160_1:
11832 	case PCI_PRODUCT_INTEL_WL_3160_2:
11833 		sc->sc_fwname = "iwm-3160-17";
11834 		sc->host_interrupt_operation_mode = 1;
11835 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11836 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11837 		sc->sc_nvm_max_section_size = 16384;
11838 		sc->nvm_type = IWM_NVM;
11839 		break;
11840 	case PCI_PRODUCT_INTEL_WL_3165_1:
11841 	case PCI_PRODUCT_INTEL_WL_3165_2:
11842 		sc->sc_fwname = "iwm-7265D-29";
11843 		sc->host_interrupt_operation_mode = 0;
11844 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11845 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11846 		sc->sc_nvm_max_section_size = 16384;
11847 		sc->nvm_type = IWM_NVM;
11848 		break;
11849 	case PCI_PRODUCT_INTEL_WL_3168_1:
11850 		sc->sc_fwname = "iwm-3168-29";
11851 		sc->host_interrupt_operation_mode = 0;
11852 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11853 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11854 		sc->sc_nvm_max_section_size = 16384;
11855 		sc->nvm_type = IWM_NVM_SDP;
11856 		break;
11857 	case PCI_PRODUCT_INTEL_WL_7260_1:
11858 	case PCI_PRODUCT_INTEL_WL_7260_2:
11859 		sc->sc_fwname = "iwm-7260-17";
11860 		sc->host_interrupt_operation_mode = 1;
11861 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11862 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11863 		sc->sc_nvm_max_section_size = 16384;
11864 		sc->nvm_type = IWM_NVM;
11865 		break;
11866 	case PCI_PRODUCT_INTEL_WL_7265_1:
11867 	case PCI_PRODUCT_INTEL_WL_7265_2:
11868 		sc->sc_fwname = "iwm-7265-17";
11869 		sc->host_interrupt_operation_mode = 0;
11870 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11871 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11872 		sc->sc_nvm_max_section_size = 16384;
11873 		sc->nvm_type = IWM_NVM;
11874 		break;
11875 	case PCI_PRODUCT_INTEL_WL_8260_1:
11876 	case PCI_PRODUCT_INTEL_WL_8260_2:
11877 		sc->sc_fwname = "iwm-8000C-36";
11878 		sc->host_interrupt_operation_mode = 0;
11879 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11880 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11881 		sc->sc_nvm_max_section_size = 32768;
11882 		sc->nvm_type = IWM_NVM_EXT;
11883 		break;
11884 	case PCI_PRODUCT_INTEL_WL_8265_1:
11885 		sc->sc_fwname = "iwm-8265-36";
11886 		sc->host_interrupt_operation_mode = 0;
11887 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11888 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11889 		sc->sc_nvm_max_section_size = 32768;
11890 		sc->nvm_type = IWM_NVM_EXT;
11891 		break;
11892 	case PCI_PRODUCT_INTEL_WL_9260_1:
11893 		sc->sc_fwname = "iwm-9260-46";
11894 		sc->host_interrupt_operation_mode = 0;
11895 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11896 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11897 		sc->sc_nvm_max_section_size = 32768;
11898 		sc->sc_mqrx_supported = 1;
11899 		break;
11900 	case PCI_PRODUCT_INTEL_WL_9560_1:
11901 	case PCI_PRODUCT_INTEL_WL_9560_2:
11902 	case PCI_PRODUCT_INTEL_WL_9560_3:
11903 		sc->sc_fwname = "iwm-9000-46";
11904 		sc->host_interrupt_operation_mode = 0;
11905 		sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11906 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11907 		sc->sc_nvm_max_section_size = 32768;
11908 		sc->sc_mqrx_supported = 1;
11909 		sc->sc_integrated = 1;
11910 		if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_WL_9560_3) {
11911 			sc->sc_xtal_latency = 670;
11912 			sc->sc_extra_phy_config = IWM_FW_PHY_CFG_SHARED_CLK;
11913 		} else
11914 			sc->sc_xtal_latency = 650;
11915 		break;
11916 	default:
11917 		printf("%s: unknown adapter type\n", DEVNAME(sc));
11918 		goto fail;
11919 	}
11920 #ifdef __FreeBSD_version
11921 #undef PCI_PRODUCT
11922 #endif
11923 
11924 	/*
11925 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
11926 	 * changed, and now the revision step also includes bit 0-1 (no more
11927 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
11928 	 * in the old format.
11929 	 */
11930 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
11931 		uint32_t hw_step;
11932 
11933 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11934 				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
11935 
11936 		if (iwm_prepare_card_hw(sc) != 0) {
11937 			printf("%s: could not initialize hardware\n",
11938 			    DEVNAME(sc));
11939 			goto fail;
11940 		}
11941 
11942 		/*
11943 		 * In order to recognize C step the driver should read the
11944 		 * chip version id located at the AUX bus MISC address.
11945 		 */
11946 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
11947 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
11948 		DELAY(2);
11949 
11950 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
11951 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
11952 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
11953 				   25000);
11954 		if (!err) {
11955 			printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
11956 			goto fail;
11957 		}
11958 
11959 		if (iwm_nic_lock(sc)) {
11960 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
11961 			hw_step |= IWM_ENABLE_WFPM;
11962 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
11963 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
11964 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
11965 			if (hw_step == 0x3)
11966 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
11967 						(IWM_SILICON_C_STEP << 2);
11968 			iwm_nic_unlock(sc);
11969 		} else {
11970 			printf("%s: Failed to lock the nic\n", DEVNAME(sc));
11971 			goto fail;
11972 		}
11973 	}
11974 
11975 	/*
11976 	 * Allocate DMA memory for firmware transfers.
11977 	 * Must be aligned on a 16-byte boundary.
11978 	 */
11979 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
11980 	    sc->sc_fwdmasegsz, 16);
11981 	if (err) {
11982 		printf("%s: could not allocate memory for firmware\n",
11983 		    DEVNAME(sc));
11984 		goto fail;
11985 	}
11986 
11987 	/* Allocate "Keep Warm" page, used internally by the card. */
11988 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
11989 	if (err) {
11990 		printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
11991 		goto fail1;
11992 	}
11993 
11994 	/* Allocate interrupt cause table (ICT).*/
11995 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11996 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
11997 	if (err) {
11998 		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
11999 		goto fail2;
12000 	}
12001 
12002 	/* TX scheduler rings must be aligned on a 1KB boundary. */
12003 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
12004 	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
12005 	if (err) {
12006 		printf("%s: could not allocate TX scheduler rings\n",
12007 		    DEVNAME(sc));
12008 		goto fail3;
12009 	}
12010 
12011 	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
12012 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
12013 		if (err) {
12014 			printf("%s: could not allocate TX ring %d\n",
12015 			    DEVNAME(sc), txq_i);
12016 			goto fail4;
12017 		}
12018 	}
12019 
12020 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
12021 	if (err) {
12022 		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
12023 		goto fail4;
12024 	}
12025 
12026 	sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
12027 	if (sc->sc_nswq == NULL)
12028 		goto fail4;
12029 
12030 	/* Clear pending interrupts. */
12031 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
12032 
12033 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
12034 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
12035 	ic->ic_state = IEEE80211_S_INIT;
12036 
12037 	/* Set device capabilities. */
12038 	ic->ic_caps =
12039 	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
12040 	    IEEE80211_C_WEP |		/* WEP */
12041 	    IEEE80211_C_RSN |		/* WPA/RSN */
12042 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
12043 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
12044 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
12045 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
12046 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
12047 
12048 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
12049 	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
12050 	ic->ic_htcaps |=
12051 	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
12052 	ic->ic_htxcaps = 0;
12053 	ic->ic_txbfcaps = 0;
12054 	ic->ic_aselcaps = 0;
12055 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
12056 
12057 	ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 |
12058 	    (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K <<
12059 	    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT) |
12060 	    (IEEE80211_VHTCAP_CHAN_WIDTH_80 <<
12061 	     IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT) | IEEE80211_VHTCAP_SGI80 |
12062 	    IEEE80211_VHTCAP_RX_ANT_PATTERN | IEEE80211_VHTCAP_TX_ANT_PATTERN;
12063 
12064 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
12065 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
12066 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
12067 
12068 	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
12069 		sc->sc_phyctxt[i].id = i;
12070 		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
12071 		sc->sc_phyctxt[i].vht_chan_width =
12072 		    IEEE80211_VHTOP0_CHAN_WIDTH_HT;
12073 	}
12074 
12075 	sc->sc_amrr.amrr_min_success_threshold =  1;
12076 	sc->sc_amrr.amrr_max_success_threshold = 15;
12077 
12078 	/* IBSS channel undefined for now. */
12079 	ic->ic_ibss_chan = &ic->ic_channels[1];
12080 
12081 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
12082 
12083 	ifp->if_softc = sc;
12084 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
12085 	ifp->if_ioctl = iwm_ioctl;
12086 	ifp->if_start = iwm_start;
12087 	ifp->if_watchdog = iwm_watchdog;
12088 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
12089 
12090 	if_attach(ifp);
12091 	ieee80211_ifattach(ifp);
12092 	ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
12093 
12094 #if NBPFILTER > 0
12095 	iwm_radiotap_attach(sc);
12096 #endif
12097 	timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
12098 	timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
12099 	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
12100 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
12101 		rxba->baid = IWM_RX_REORDER_DATA_INVALID_BAID;
12102 		rxba->sc = sc;
12103 		timeout_set(&rxba->session_timer, iwm_rx_ba_session_expired,
12104 		    rxba);
12105 		timeout_set(&rxba->reorder_buf.reorder_timer,
12106 		    iwm_reorder_timer_expired, &rxba->reorder_buf);
12107 		for (j = 0; j < nitems(rxba->entries); j++)
12108 			ml_init(&rxba->entries[j].frames);
12109 	}
12110 	task_set(&sc->init_task, iwm_init_task, sc);
12111 	task_set(&sc->newstate_task, iwm_newstate_task, sc);
12112 	task_set(&sc->ba_task, iwm_ba_task, sc);
12113 	task_set(&sc->mac_ctxt_task, iwm_mac_ctxt_task, sc);
12114 	task_set(&sc->phy_ctxt_task, iwm_phy_ctxt_task, sc);
12115 	task_set(&sc->bgscan_done_task, iwm_bgscan_done_task, sc);
12116 
12117 	ic->ic_node_alloc = iwm_node_alloc;
12118 	ic->ic_bgscan_start = iwm_bgscan;
12119 	ic->ic_bgscan_done = iwm_bgscan_done;
12120 	ic->ic_set_key = iwm_set_key;
12121 	ic->ic_delete_key = iwm_delete_key;
12122 
12123 	/* Override 802.11 state transition machine. */
12124 	sc->sc_newstate = ic->ic_newstate;
12125 	ic->ic_newstate = iwm_newstate;
12126 	ic->ic_updateprot = iwm_updateprot;
12127 	ic->ic_updateslot = iwm_updateslot;
12128 	ic->ic_updateedca = iwm_updateedca;
12129 	ic->ic_updatechan = iwm_updatechan;
12130 	ic->ic_updatedtim = iwm_updatedtim;
12131 	ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
12132 	ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
12133 	ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
12134 	ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
12135 #ifdef __HAIKU__
12136 	iwm_preinit(sc);
12137 #else
12138 	/*
12139 	 * We cannot read the MAC address without loading the
12140 	 * firmware from disk. Postpone until mountroot is done.
12141 	 */
12142 	config_mountroot(self, iwm_attach_hook);
12143 #endif
12144 
12145 	return 0;
12146 
12147 fail4:	while (--txq_i >= 0)
12148 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
12149 	iwm_free_rx_ring(sc, &sc->rxq);
12150 	iwm_dma_contig_free(&sc->sched_dma);
12151 fail3:	if (sc->ict_dma.vaddr != NULL)
12152 		iwm_dma_contig_free(&sc->ict_dma);
12153 
12154 fail2:	iwm_dma_contig_free(&sc->kw_dma);
12155 fail1:	iwm_dma_contig_free(&sc->fw_dma);
12156 #ifdef __HAIKU__
12157 fail:
12158 	if_free_inplace(ifp);
12159 #endif
12160 	return -1;
12161 }
12162 
12163 #if NBPFILTER > 0
12164 void
12165 iwm_radiotap_attach(struct iwm_softc *sc)
12166 {
12167 	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
12168 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
12169 
12170 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
12171 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
12172 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
12173 
12174 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
12175 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
12176 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
12177 }
12178 #endif
12179 
12180 void
12181 iwm_init_task(void *arg1)
12182 {
12183 	struct iwm_softc *sc = arg1;
12184 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12185 	int s = splnet();
12186 	int generation = sc->sc_generation;
12187 	int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
12188 
12189 	rw_enter_write(&sc->ioctl_rwl);
12190 	if (generation != sc->sc_generation) {
12191 		rw_exit(&sc->ioctl_rwl);
12192 		splx(s);
12193 		return;
12194 	}
12195 
12196 	if (ifp->if_flags & IFF_RUNNING)
12197 		iwm_stop(ifp);
12198 	else
12199 		sc->sc_flags &= ~IWM_FLAG_HW_ERR;
12200 
12201 	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
12202 		iwm_init(ifp);
12203 
12204 	rw_exit(&sc->ioctl_rwl);
12205 	splx(s);
12206 }
12207 
12208 void
12209 iwm_resume(struct iwm_softc *sc)
12210 {
12211 	pcireg_t reg;
12212 
12213 	/*
12214 	 * We disable the RETRY_TIMEOUT register (0x41) to keep
12215 	 * PCI Tx retries from interfering with C3 CPU state.
12216 	 */
12217 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
12218 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
12219 
12220 	if (!sc->sc_msix) {
12221 		/* Hardware bug workaround. */
12222 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
12223 		    PCI_COMMAND_STATUS_REG);
12224 		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
12225 			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
12226 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
12227 		    PCI_COMMAND_STATUS_REG, reg);
12228 	}
12229 
12230 	iwm_disable_interrupts(sc);
12231 }
12232 
12233 int
12234 iwm_wakeup(struct iwm_softc *sc)
12235 {
12236 	struct ieee80211com *ic = &sc->sc_ic;
12237 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12238 	int err;
12239 
12240 	err = iwm_start_hw(sc);
12241 	if (err)
12242 		return err;
12243 
12244 	err = iwm_init_hw(sc);
12245 	if (err)
12246 		return err;
12247 
12248 	refcnt_init(&sc->task_refs);
12249 	ifq_clr_oactive(&ifp->if_snd);
12250 	ifp->if_flags |= IFF_RUNNING;
12251 
12252 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
12253 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
12254 	else
12255 		ieee80211_begin_scan(ifp);
12256 
12257 	return 0;
12258 }
12259 
12260 #ifdef __FreeBSD_version
12261 static device_method_t iwm_pci_methods[] = {
12262 	/* Device interface */
12263 	DEVMETHOD(device_probe,         iwm_probe),
12264 	DEVMETHOD(device_attach,        iwm_attach),
12265 #if 0
12266 	DEVMETHOD(device_detach,        iwm_detach),
12267 	DEVMETHOD(device_suspend,       iwm_suspend),
12268 	DEVMETHOD(device_resume,        iwm_resume),
12269 #endif
12270 
12271 	DEVMETHOD_END
12272 };
12273 
12274 static driver_t iwm_pci_driver = {
12275 	"iwm",
12276 	iwm_pci_methods,
12277 	sizeof (struct iwm_softc)
12278 };
12279 
12280 DRIVER_MODULE(iwm, pci, iwm_pci_driver, NULL, NULL);
12281 #else
12282 int
12283 iwm_activate(struct device *self, int act)
12284 {
12285 	struct iwm_softc *sc = (struct iwm_softc *)self;
12286 	struct ifnet *ifp = &sc->sc_ic.ic_if;
12287 	int err = 0;
12288 
12289 	switch (act) {
12290 	case DVACT_QUIESCE:
12291 		if (ifp->if_flags & IFF_RUNNING) {
12292 			rw_enter_write(&sc->ioctl_rwl);
12293 			iwm_stop(ifp);
12294 			rw_exit(&sc->ioctl_rwl);
12295 		}
12296 		break;
12297 	case DVACT_RESUME:
12298 		iwm_resume(sc);
12299 		break;
12300 	case DVACT_WAKEUP:
12301 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
12302 			err = iwm_wakeup(sc);
12303 			if (err)
12304 				printf("%s: could not initialize hardware\n",
12305 				    DEVNAME(sc));
12306 		}
12307 		break;
12308 	}
12309 
12310 	return 0;
12311 }
12312 
12313 struct cfdriver iwm_cd = {
12314 	NULL, "iwm", DV_IFNET
12315 };
12316 
12317 const struct cfattach iwm_ca = {
12318 	sizeof(struct iwm_softc), iwm_match, iwm_attach,
12319 	NULL, iwm_activate
12320 };
12321 #endif
12322